Merge branch 'dev' into dev

This commit is contained in:
Tom Foster 2024-04-22 12:52:12 +01:00 committed by GitHub
commit 5231ba08d8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
76 changed files with 3472 additions and 5078 deletions

View file

@ -129,9 +129,9 @@ artifacts:
.push-oci-image:
stage: publish
image: docker:26.0.1
image: docker:26.0.2
services:
- docker:26.0.1-dind
- docker:26.0.2-dind
variables:
IMAGE_SUFFIX_AMD64: amd64
IMAGE_SUFFIX_ARM64V8: arm64v8

19
Cargo.lock generated
View file

@ -399,12 +399,13 @@ dependencies = [
[[package]]
name = "cc"
version = "1.0.94"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7"
checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b"
dependencies = [
"jobserver",
"libc",
"once_cell",
]
[[package]]
@ -536,6 +537,7 @@ dependencies = [
"reqwest",
"ring",
"ruma",
"ruma-identifiers-validation",
"rusqlite",
"rust-rocksdb",
"sd-notify",
@ -2994,9 +2996,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "signal-hook-registry"
version = "1.4.1"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
dependencies = [
"libc",
]
@ -3163,18 +3165,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.58"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.58"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66"
dependencies = [
"proc-macro2",
"quote",
@ -3437,6 +3439,7 @@ dependencies = [
"bitflags 2.5.0",
"bytes",
"futures-core",
"futures-util",
"http",
"http-body",
"http-body-util",

View file

@ -21,7 +21,7 @@ rust-version = "1.75.0"
rand = "0.8.5"
# Used for conduit::Error type
thiserror = "1.0.58"
thiserror = "1.0.59"
# Used to encode server public key
base64 = "0.22.0"
@ -105,7 +105,14 @@ features = ["util"]
[dependencies.tower-http]
version = "0.5.2"
features = ["add-extension", "cors", "sensitive-headers", "trace", "util"]
features = [
"add-extension",
"cors",
"sensitive-headers",
"trace",
"util",
"catch-panic",
]
[dependencies.hyper]
version = "1.3.1"
@ -122,7 +129,7 @@ features = ["rustls-tls-native-roots", "socks", "hickory-dns"]
# all the serde stuff
# Used for pdu definition
[dependencies.serde]
version = "1.0.197"
version = "1.0.198"
features = ["rc"]
# Used for appservice registration files
[dependencies.serde_yaml]
@ -245,7 +252,7 @@ default-features = false
# Used for reading the configuration from conduit.toml & environment variables
[dependencies.figment]
version = "0.10.17"
version = "0.10.18"
features = ["env", "toml"]
# Used for matrix spec type definitions and helpers
@ -275,6 +282,10 @@ features = [
"unstable-extensible-events",
]
[dependencies.ruma-identifiers-validation]
git = "https://github.com/girlbossceo/ruma"
branch = "conduwuit-changes"
[dependencies.hickory-resolver]
version = "0.24.1"
default-features = false

39
bin/complement Executable file
View file

@ -0,0 +1,39 @@
#!/usr/bin/env bash
set -euo pipefail
# Path to Complement's source code
#
# The `COMPLEMENT_SRC` environment variable is set in the Nix dev shell, which
# points to a store path containing the Complement source code. It's likely you
# want to just pass that as the first argument to use it here.
COMPLEMENT_SRC="$1"
# A `.jsonl` file to write test logs to
LOG_FILE="$2"
# A `.jsonl` file to write test results to
RESULTS_FILE="$3"
OCI_IMAGE="complement-conduit:dev"
pushd "$(git rev-parse --show-toplevel)" > /dev/null
nix build .#complement
docker load < result
popd > /dev/null
# It's okay (likely, even) that `go test` exits nonzero
set +o pipefail
env \
-C "$COMPLEMENT_SRC" \
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
go test -timeout 1h -json ./tests | tee "$LOG_FILE"
set -o pipefail
# Post-process the results into an easy-to-compare format
cat "$LOG_FILE" | jq -c '
select(
(.Action == "pass" or .Action == "fail" or .Action == "skip")
and .Test != null
) | {Action: .Action, Test: .Test}
' | sort > "$RESULTS_FILE"

View file

@ -288,8 +288,8 @@ allow_profile_lookup_federation_requests = true
# For release builds, the tracing crate is configured to only implement levels higher than error to avoid unnecessary overhead in the compiled binary from trace macros.
# For debug builds, this restriction is not applied.
#
# Defaults to "warn"
#log = "warn"
# Defaults to "info"
#log = "info"
# controls whether encrypted rooms and events are allowed (default true)
#allow_encryption = false
@ -512,23 +512,24 @@ allow_profile_lookup_federation_requests = true
##
## Generally these defaults are the best, but if you find a reason to need to change these they are here.
# Default/base connection timeout
# Default/base connection timeout.
# This is used only by URL previews and update/news endpoint checks
#
# Defaults to 10 seconds
#request_conn_timeout = 10
# Default/base request timeout
# This is used only by URL previews and update/news endpoint checks
# Default/base request timeout. The time waiting to receive more data from another server.
# This is used only by URL previews, update/news, and misc endpoint checks
#
# Defaults to 35 seconds
#request_timeout = 35
# Default/base max idle connections per host
# Default/base request total timeout. The time limit for a whole request. This is set very high to not
# cancel healthy requests while serving as a backstop.
# This is used only by URL previews and update/news endpoint checks
#
# Defaults to 1 as generally the same open connection can be re-used
#request_idle_per_host = 1
# Defaults to 320 seconds
#request_total_timeout = 320
# Default/base idle connection pool timeout
# This is used only by URL previews and update/news endpoint checks
@ -536,6 +537,12 @@ allow_profile_lookup_federation_requests = true
# Defaults to 5 seconds
#request_idle_timeout = 5
# Default/base max idle connections per host
# This is used only by URL previews and update/news endpoint checks
#
# Defaults to 1 as generally the same open connection can be re-used
#request_idle_per_host = 1
# Federation well-known resolution connection timeout
#
# Defaults to 6 seconds
@ -546,21 +553,32 @@ allow_profile_lookup_federation_requests = true
# Defaults to 10 seconds
#well_known_timeout = 10
# Federation client/server request timeout
# Federation client request timeout
# You most definitely want this to be high to account for extremely large room joins, slow homeservers, your own resources etc.
#
# Defaults to 300 seconds
#federation_timeout = 300
# Federation client/sender max idle connections per host
# Federation client idle connection pool timeout
#
# Defaults to 25 seconds
#federation_idle_timeout = 25
# Federation client max idle connections per host
#
# Defaults to 1 as generally the same open connection can be re-used
#federation_idle_per_host = 1
# Federation client/sender idle connection pool timeout
# Federation sender request timeout
# The time it takes for the remote server to process sent transactions can take a while.
#
# Defaults to 25 seconds
#federation_idle_timeout = 25
# Defaults to 180 seconds
#sender_timeout = 180
# Federation sender idle connection pool timeout
#
# Defaults to 180 seconds
#sender_idle_timeout = 180
# Appservice URL request connection timeout
#

View file

@ -11,3 +11,5 @@
- [NixOS](deploying/nixos.md)
- [TURN](turn.md)
- [Appservices](appservices.md)
- [Development](development.md)
- [Testing](development/testing.md)

4
docs/development.md Normal file
View file

@ -0,0 +1,4 @@
# Development
Information about developing the project. If you are only interested in using
it, you can safely ignore this section.

View file

@ -0,0 +1,17 @@
# Testing
## Complement
Have a look at [Complement's repository][complement] for an explanation of what
it is.
To test against Complement, with Nix and direnv installed and set up, you can
either:
* Run `complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl ./path/to/results.jsonl`
to build a Complement image, run the tests, and output the logs and results
to the specified paths
* Run `nix build .#complement` from the root of the repository to just build a
Complement image
[complement]: https://github.com/matrix-org/complement

81
flake.lock generated
View file

@ -9,11 +9,11 @@
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1707922053,
"narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=",
"lastModified": 1711742460,
"narHash": "sha256-0O4v6e4a1toxXZ2gf5INhg4WPE5C5T+SVvsBt+45Mcc=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "6eabc3f02fae3683bffab483e614bebfcd476b21",
"rev": "4dbdbee45728d8ce5788db6461aaaa89d98081f0",
"type": "github"
},
"original": {
@ -23,6 +23,22 @@
"type": "github"
}
},
"complement": {
"flake": false,
"locked": {
"lastModified": 1713458251,
"narHash": "sha256-hom/Lt0gZzLWqFhUJG0X2i88CAMIILInO5w0tPj6G3s=",
"owner": "matrix-org",
"repo": "complement",
"rev": "d73c81a091604b0fc5b6b0617dcac58c25763f57",
"type": "github"
},
"original": {
"owner": "matrix-org",
"repo": "complement",
"type": "github"
}
},
"crane": {
"inputs": {
"nixpkgs": [
@ -51,17 +67,17 @@
]
},
"locked": {
"lastModified": 1707685877,
"narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=",
"lastModified": 1713738183,
"narHash": "sha256-qd/MuLm7OfKQKyd4FAMqV4H6zYyOfef5lLzRrmXwKJM=",
"owner": "ipetkov",
"repo": "crane",
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
"rev": "f6c6a2fb1b8bd9b65d65ca9342dd0eb180a63f11",
"type": "github"
},
"original": {
"owner": "ipetkov",
"ref": "master",
"repo": "crane",
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
"type": "github"
}
},
@ -73,11 +89,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1711606966,
"narHash": "sha256-nTaO7ZDL4D02dVC5ktqnXNiNuODBUHyE4qEcFjAUCQY=",
"lastModified": 1713680591,
"narHash": "sha256-3pbv7UgAgetwz9YdjzIT/lZ6Rgj6wj6MR4mphBLyDjU=",
"owner": "nix-community",
"repo": "fenix",
"rev": "aa45c3e901ea42d6633af083c0c555efaf948b17",
"rev": "19aaa94a73cc670a4d87e84f0909966cd8f8cd79",
"type": "github"
},
"original": {
@ -168,11 +184,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1702539185,
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
"lastModified": 1711401922,
"narHash": "sha256-QoQqXoj8ClGo0sqD/qWKFWezgEwUL0SUh37/vY2jNhc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
"rev": "07262b18b97000d16a4bdb003418bd2fb067a932",
"type": "github"
},
"original": {
@ -184,11 +200,11 @@
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1702780907,
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
"lastModified": 1711460390,
"narHash": "sha256-akSgjDZL6pVHEfSE6sz1DNSXuYX6hq+P/1Z5IoYWs7E=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
"rev": "44733514b72e732bd49f5511bd0203dea9b9a434",
"type": "github"
},
"original": {
@ -200,11 +216,11 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1711523803,
"narHash": "sha256-UKcYiHWHQynzj6CN/vTcix4yd1eCu1uFdsuarupdCQQ=",
"lastModified": 1713537308,
"narHash": "sha256-XtTSSIB2DA6tOv+l0FhvfDMiyCmhoRbNB+0SeInZkbk=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2726f127c15a4cc9810843b96cad73c7eb39e443",
"rev": "5c24cf2f0a12ad855f444c30b2421d044120c66f",
"type": "github"
},
"original": {
@ -214,25 +230,44 @@
"type": "github"
}
},
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1713310517,
"narHash": "sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8=",
"owner": "facebook",
"repo": "rocksdb",
"rev": "bcf88d48ce8aa8b536aee4dd305533b3b83cf435",
"type": "github"
},
"original": {
"owner": "facebook",
"ref": "v9.1.0",
"repo": "rocksdb",
"type": "github"
}
},
"root": {
"inputs": {
"attic": "attic",
"complement": "complement",
"crane": "crane_2",
"fenix": "fenix",
"flake-compat": "flake-compat_2",
"flake-utils": "flake-utils_2",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_2"
"nixpkgs": "nixpkgs_2",
"rocksdb": "rocksdb"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1711562745,
"narHash": "sha256-s/YOyBM0vumhkqCFi8CnV5imFlC5JJrGia8CmEXyQkM=",
"lastModified": 1713628977,
"narHash": "sha256-iN5QUlUq527lswmBC+RopfXdu6Xx7mmTaBSH2l59FtM=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "ad51a17c627b4ca57f83f0dc1f3bb5f3f17e6d0b",
"rev": "55d9a533b309119c8acd13061581b43ae8840823",
"type": "github"
},
"original": {

424
flake.nix
View file

@ -1,353 +1,68 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
attic.url = "github:zhaofengli/attic?ref=main";
complement = { url = "github:matrix-org/complement"; flake = false; };
crane = { url = "github:ipetkov/crane?ref=master"; inputs.nixpkgs.follows = "nixpkgs"; };
fenix = { url = "github:nix-community/fenix"; inputs.nixpkgs.follows = "nixpkgs"; };
flake-compat = { url = "github:edolstra/flake-compat"; flake = false; };
flake-utils.url = "github:numtide/flake-utils";
nix-filter.url = "github:numtide/nix-filter";
flake-compat = {
url = "github:edolstra/flake-compat";
flake = false;
};
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
crane = {
# Pin latest crane that's not affected by the following bugs:
#
# * <https://github.com/ipetkov/crane/issues/527#issuecomment-1978079140>
# * <https://github.com/toml-rs/toml/issues/691>
# * <https://github.com/toml-rs/toml/issues/267>
url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e";
inputs.nixpkgs.follows = "nixpkgs";
};
attic.url = "github:zhaofengli/attic?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
rocksdb = { url = "github:facebook/rocksdb?ref=v9.1.0"; flake = false; };
};
outputs =
{ self
, nixpkgs
, flake-utils
, nix-filter
, fenix
, crane
, ...
}: flake-utils.lib.eachDefaultSystem (system:
outputs = inputs:
inputs.flake-utils.lib.eachDefaultSystem (system:
let
pkgsHost = nixpkgs.legacyPackages.${system};
allocator = null;
rocksdb' = pkgs:
let
version = "9.1.0";
in
(pkgs.rocksdb.overrideAttrs (old: {
inherit version;
src = pkgs.fetchFromGitHub {
owner = "facebook";
repo = "rocksdb";
rev = "bcf88d48ce8aa8b536aee4dd305533b3b83cf435";
hash = "sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8";
};
}));
# Nix-accessible `Cargo.toml`
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
pkgsHost = inputs.nixpkgs.legacyPackages.${system};
# The Rust toolchain to use
toolchain = fenix.packages.${system}.fromToolchainFile {
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
};
builder = pkgs:
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
scope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
book = self.callPackage ./nix/pkgs/book {};
complement = self.callPackage ./nix/pkgs/complement {};
craneLib = ((inputs.crane.mkLib pkgs).overrideToolchain toolchain);
inherit inputs;
main = self.callPackage ./nix/pkgs/main {};
oci-image = self.callPackage ./nix/pkgs/oci-image {};
rocksdb = pkgs.rocksdb.overrideAttrs (old: {
src = inputs.rocksdb;
version = pkgs.lib.removePrefix
"v"
(builtins.fromJSON (builtins.readFile ./flake.lock))
.nodes.rocksdb.original.ref;
});
});
nativeBuildInputs = pkgs: let
darwin = if pkgs.stdenv.isDarwin then [ pkgs.libiconv ] else [];
in [
# bindgen needs the build platform's libclang. Apparently due to
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
# quite do the right thing here.
pkgs.pkgsBuildHost.rustPlatform.bindgenHook
] ++ darwin;
env = pkgs: {
CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev;
ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include";
ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib";
}
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = "";
}
// {
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
lib.concatStringsSep " " ([ ]
++ lib.optionals
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
# leaving PIE enabled.
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[ "-l" "c" ]
++ lib.optionals
# This check has to match the one [here][0]. We only need to set
# these flags when using a different linker. Don't ask me why,
# though, because I don't know. All I know is it breaks otherwise.
#
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
(
# Nixpkgs doesn't check for x86_64 here but we do, because I
# observed a failure building statically for x86_64 without
# including it here. Linkers are weird.
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
&& stdenv.hostPlatform.isStatic
&& !stdenv.isDarwin
&& !stdenv.cc.bintools.isLLVM
)
[
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
// (
let
inherit (pkgs.rust.lib) envVars;
in
pkgs.lib.optionalAttrs
(pkgs.stdenv.targetPlatform.rust.rustcTarget
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
envVars.linkerForTarget;
}
)
// (
let
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
// (
let
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++";
}
)
);
mkPackage = pkgs: allocator: cargoArgs: profile: builder pkgs {
src = nix-filter {
root = ./.;
include = [
"src"
"Cargo.toml"
"Cargo.lock"
];
};
rocksdb' = (if allocator == "jemalloc" then (pkgs.rocksdb.override { enableJemalloc = true; }) else (rocksdb' pkgs));
# This is redundant with CI
doCheck = false;
env = env pkgs;
nativeBuildInputs = nativeBuildInputs pkgs;
cargoExtraArgs = cargoArgs
+ (if allocator == "jemalloc" then " --features jemalloc" else "")
+ (if allocator == "hmalloc" then " --features hardened_malloc" else "")
;
meta.mainProgram = cargoToml.package.name;
CARGO_PROFILE = profile;
};
mkOciImage = pkgs: package:
pkgs.dockerTools.buildLayeredImage {
name = package.pname;
tag = "main";
# Debian makes builds reproducible through using the HEAD commit's date
created = "@${toString self.lastModified}";
contents = [
pkgs.dockerTools.caCertificates
];
config = {
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
Entrypoint = if !pkgs.stdenv.isDarwin then [
"${pkgs.lib.getExe' pkgs.tini "tini"}"
"--"
] else [];
Cmd = [
"${pkgs.lib.getExe package}"
];
};
};
createComplementRuntime = pkgs: image: let
complement = pkgs.fetchFromGitHub {
owner = "matrix-org";
repo = "complement";
rev = "d73c81a091604b0fc5b6b0617dcac58c25763f57";
hash = "sha256-hom/Lt0gZzLWqFhUJG0X2i88CAMIILInO5w0tPj6G3s";
};
script = pkgs.writeShellScriptBin "run.sh"
''
export PATH=${pkgs.lib.makeBinPath [ pkgs.olm pkgs.gcc ]}
${pkgs.lib.getExe pkgs.docker} load < ${image}
set +o pipefail
/usr/bin/env -C "${complement}" COMPLEMENT_BASE_IMAGE="complement-conduit:dev" ${pkgs.lib.getExe pkgs.go} test -json ${complement}/tests | ${pkgs.toybox}/bin/tee $1
set -o pipefail
# Post-process the results into an easy-to-compare format
${pkgs.coreutils}/bin/cat "$1" | ${pkgs.lib.getExe pkgs.jq} -c '
select(
(.Action == "pass" or .Action == "fail" or .Action == "skip")
and .Test != null
) | {Action: .Action, Test: .Test}
' | ${pkgs.coreutils}/bin/sort > "$2"
'';
in script;
createComplementImage = pkgs: let
conduwuit = mkPackage pkgs "jemalloc" "--features=axum_dual_protocol" "dev";
in pkgs.dockerTools.buildImage {
name = "complement-conduit";
tag = "dev";
copyToRoot = pkgs.stdenv.mkDerivation {
name = "complement_data";
src = nix-filter {
root = ./.;
include = [
"tests/complement/conduwuit-complement.toml"
"tests/complement/v3.ext"
];
};
phases = [ "unpackPhase" "installPhase" ];
installPhase = ''
mkdir -p $out/conduwuit/data
cp $src/tests/complement/conduwuit-complement.toml $out/conduwuit/conduit.toml
cp $src/tests/complement/v3.ext $out/v3.ext
'';
};
config = {
Cmd = [
"${pkgs.bash}/bin/sh"
"-c"
''
echo "Starting server as $SERVER_NAME" &&
export CONDUIT_SERVER_NAME=$SERVER_NAME CONDUIT_WELL_KNOWN_SERVER="$SERVER_NAME:8448" CONDUIT_WELL_KNOWN_SERVER="$SERVER_NAME:8008" &&
${pkgs.lib.getExe pkgs.openssl} genrsa -out /conduwuit/private_key.key 2048 &&
${pkgs.lib.getExe pkgs.openssl} req -new -sha256 -key /conduwuit/private_key.key -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" -out /conduwuit/signing_request.csr &&
echo "DNS.1 = $SERVER_NAME" >> /v3.ext &&
echo "IP.1 = $(${pkgs.lib.getExe pkgs.gawk} 'END{print $1}' /etc/hosts)" >> /v3.ext &&
${pkgs.lib.getExe pkgs.openssl} x509 -req -extfile /v3.ext -in /conduwuit/signing_request.csr -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -CAcreateserial -out /conduwuit/certificate.crt -days 1 -sha256 &&
${pkgs.lib.getExe conduwuit}
''
];
Entrypoint = if !pkgs.stdenv.isDarwin then [
"${pkgs.lib.getExe' pkgs.tini "tini"}"
"--"
] else [];
Env = [
"SSL_CERT_FILE=/complement/ca/ca.crt"
"SERVER_NAME=localhost"
"CONDUIT_CONFIG=/conduwuit/conduit.toml"
];
ExposedPorts = {
"8008/tcp" = {};
"8448/tcp" = {};
};
};
};
scopeHost = (scope pkgsHost);
in
{
packages = {
default = mkPackage pkgsHost null "" "release";
jemalloc = mkPackage pkgsHost "jemalloc" "" "release";
hmalloc = mkPackage pkgsHost "hmalloc" "" "release";
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
oci-image-jemalloc = mkOciImage pkgsHost self.packages.${system}.jemalloc;
oci-image-hmalloc = mkOciImage pkgsHost self.packages.${system}.hmalloc;
default = scopeHost.main;
jemalloc = scopeHost.main.override { features = ["jemalloc"]; };
hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; };
book =
let
package = self.packages.${system}.default;
in
pkgsHost.stdenv.mkDerivation {
pname = "${package.pname}-book";
version = package.version;
src = nix-filter {
root = ./.;
include = [
"book.toml"
"conduwuit-example.toml"
"README.md"
"debian/README.md"
"docs"
];
};
nativeBuildInputs = (with pkgsHost; [
mdbook
]);
buildPhase = ''
mdbook build
mv public $out
'';
oci-image = scopeHost.oci-image;
oci-image-jemalloc = scopeHost.oci-image.override {
main = scopeHost.main.override {
features = ["jemalloc"];
};
complement-image = createComplementImage pkgsHost;
complement-runtime = createComplementRuntime pkgsHost self.outputs.packages.${system}.complement-image;
};
oci-image-hmalloc = scopeHost.oci-image.override {
main = scopeHost.main.override {
features = ["hardened_malloc"];
};
};
book = scopeHost.book;
complement = scopeHost.complement;
}
//
builtins.listToAttrs
@ -357,54 +72,61 @@
let
binaryName = "static-${crossSystem}";
pkgsCrossStatic =
(import nixpkgs {
(import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
scopeCrossStatic = scope pkgsCrossStatic;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = mkPackage pkgsCrossStatic null "" "release";
value = scopeCrossStatic.main;
}
# An output for a statically-linked binary with jemalloc
{
name = "${binaryName}-jemalloc";
value = mkPackage pkgsCrossStatic "jemalloc" "" "release";
value = scopeCrossStatic.main.override {
features = ["jemalloc"];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
value = mkPackage pkgsCrossStatic "hmalloc" "" "release";
value = scopeCrossStatic.main.override {
features = ["hardened_malloc"];
};
}
# An output for an OCI image based on that binary
{
name = "oci-image-${crossSystem}";
value = mkOciImage
pkgsCrossStatic
self.packages.${system}.${binaryName};
value = scopeCrossStatic.oci-image;
}
# An output for an OCI image based on that binary with jemalloc
{
name = "oci-image-${crossSystem}-jemalloc";
value = mkOciImage
pkgsCrossStatic
self.packages.${system}."${binaryName}-jemalloc";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
features = ["jemalloc"];
};
};
}
# An output for an OCI image based on that binary with hardened_malloc
{
name = "oci-image-${crossSystem}-hmalloc";
value = mkOciImage
pkgsCrossStatic
self.packages.${system}."${binaryName}-hmalloc";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
features = ["hardened_malloc"];
};
};
}
]
)
@ -416,24 +138,30 @@
);
devShells.default = pkgsHost.mkShell {
env = env pkgsHost // {
env = scopeHost.main.env // {
# Rust Analyzer needs to be able to find the path to default crate
# sources, and it can read this environment variable to do so. The
# `rust-src` component is required in order for this to work.
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
# Convenient way to access a pinned version of Complement's source
# code.
COMPLEMENT_SRC = inputs.complement.outPath;
};
# Development tools
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
packages = [
# Always use nightly rustfmt because most of its options are unstable
#
# This needs to come before `toolchain` in this list, otherwise
# `$PATH` will have stable rustfmt instead.
fenix.packages.${system}.latest.rustfmt
inputs.fenix.packages.${system}.latest.rustfmt
toolchain
] ++ (with pkgsHost; [
]
++ (with pkgsHost; [
engage
cargo-audit
# Needed for producing Debian packages
cargo-deb
@ -450,7 +178,9 @@
# Useful for editing the book locally
mdbook
]);
])
++
scopeHost.main.nativeBuildInputs;
};
});
}

31
nix/pkgs/book/default.nix Normal file
View file

@ -0,0 +1,31 @@
{ inputs
# Dependencies
, main
, mdbook
, stdenv
}:
stdenv.mkDerivation {
inherit (main) pname version;
src = inputs.nix-filter {
root = inputs.self;
include = [
"book.toml"
"conduwuit-example.toml"
"README.md"
"debian/README.md"
"docs"
];
};
nativeBuildInputs = [
mdbook
];
buildPhase = ''
mdbook build
mv public $out
'';
}

View file

@ -0,0 +1,19 @@
[global]
address = "0.0.0.0"
allow_device_name_federation = true
allow_guest_registration = true
allow_public_room_directory_over_federation = true
allow_public_room_directory_without_auth = true
allow_registration = true
allow_unstable_room_versions = true
database_backend = "rocksdb"
database_path = "/database"
log = "trace"
port = [8008, 8448]
trusted_servers = []
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true
[global.tls]
certs = "/certificate.crt"
dual_protocol = true
key = "/private_key.key"

View file

@ -0,0 +1,92 @@
# Dependencies
{ bashInteractive
, buildEnv
, coreutils
, dockerTools
, gawk
, lib
, main
, openssl
, stdenv
, tini
, writeShellScriptBin
}:
let
main' = main.override {
profile = "dev";
features = ["axum_dual_protocol"];
};
start = writeShellScriptBin "start" ''
set -euxo pipefail
${lib.getExe openssl} genrsa -out private_key.key 2048
${lib.getExe openssl} req \
-new \
-sha256 \
-key private_key.key \
-subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" \
-out signing_request.csr
cp ${./v3.ext} v3.ext
echo "DNS.1 = $SERVER_NAME" >> v3.ext
echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \
>> v3.ext
${lib.getExe openssl} x509 \
-req \
-extfile v3.ext \
-in signing_request.csr \
-CA /complement/ca/ca.crt \
-CAkey /complement/ca/ca.key \
-CAcreateserial \
-out certificate.crt \
-days 1 \
-sha256
${lib.getExe' coreutils "env"} \
CONDUIT_SERVER_NAME="$SERVER_NAME" \
CONDUIT_WELL_KNOWN_SERVER="$SERVER_NAME:8448" \
CONDUIT_WELL_KNOWN_SERVER="$SERVER_NAME:8008" \
${lib.getExe main'}
'';
in
dockerTools.buildImage {
name = "complement-${main.pname}";
tag = "dev";
copyToRoot = buildEnv {
name = "root";
pathsToLink = [
"/bin"
];
paths = [
bashInteractive
coreutils
main'
start
];
};
config = {
Cmd = [
"${lib.getExe start}"
];
Entrypoint = if !stdenv.isDarwin
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
then [ "${lib.getExe' tini "tini"}" "--" ]
else [];
Env = [
"SSL_CERT_FILE=/complement/ca/ca.crt"
"CONDUIT_CONFIG=${./config.toml}"
];
ExposedPorts = {
"8008/tcp" = {};
"8448/tcp" = {};
};
};
}

View file

@ -0,0 +1,100 @@
{ lib
, pkgsBuildHost
, rust
, stdenv
}:
lib.optionalAttrs stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = "";
}
//
{
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
([]
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
# leaving PIE enabled.
++ lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[ "-l" "c" ]
++ lib.optionals
# This check has to match the one [here][0]. We only need to set
# these flags when using a different linker. Don't ask me why,
# though, because I don't know. All I know is it breaks otherwise.
#
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
(
# Nixpkgs doesn't check for x86_64 here but we do, because I
# observed a failure building statically for x86_64 without
# including it here. Linkers are weird.
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
&& stdenv.hostPlatform.isStatic
&& !stdenv.isDarwin
&& !stdenv.cc.bintools.isLLVM
)
[
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
//
(
let
inherit (rust.lib) envVars;
in
lib.optionalAttrs
(stdenv.targetPlatform.rust.rustcTarget
!= stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
envVars.linkerForTarget;
}
)
//
(
let
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
//
(
let
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
}
)
)

71
nix/pkgs/main/default.nix Normal file
View file

@ -0,0 +1,71 @@
{ inputs
# Dependencies
, craneLib
, lib
, libiconv
, pkgsBuildHost
, rocksdb
, rust
, stdenv
# Options
, features ? []
, profile ? "release"
}:
craneLib.buildPackage rec {
src = inputs.nix-filter {
root = inputs.self;
include = [
"src"
"Cargo.toml"
"Cargo.lock"
];
};
# This is redundant with CI
doCheck = false;
env =
let
rocksdb' = rocksdb.override {
enableJemalloc = builtins.elem "jemalloc" features;
};
in
{
CARGO_PROFILE = profile;
CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev;
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
}
//
(import ./cross-compilation-env.nix {
inherit
lib
pkgsBuildHost
rust
stdenv;
});
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgsBuildHost.rustPlatform.bindgenHook
]
++ lib.optionals stdenv.isDarwin [ libiconv ];
cargoExtraArgs = ""
+ lib.optionalString
(features != [])
"--features " + (builtins.concatStringsSep "," features);
meta.mainProgram = (craneLib.crateNameFromCargoToml {
cargoToml = "${inputs.self}/Cargo.toml";
}).pname;
passthru = {
inherit env;
};
}

View file

@ -0,0 +1,28 @@
{ inputs
# Dependencies
, dockerTools
, lib
, main
, stdenv
, tini
}:
dockerTools.buildLayeredImage {
name = main.pname;
tag = "main";
created = "@${toString inputs.self.lastModified}";
contents = [
dockerTools.caCertificates
];
config = {
Entrypoint = if !stdenv.isDarwin
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
then [ "${lib.getExe' tini "tini"}" "--" ]
else [];
Cmd = [
"${lib.getExe main}"
];
};
}

View file

@ -10,8 +10,9 @@ use ruma::{
},
OwnedRoomAliasId, OwnedServerName,
};
use tracing::debug;
use crate::{services, Error, Result, Ruma};
use crate::{debug_info, debug_warn, services, Error, Result, Ruma};
/// # `PUT /_matrix/client/v3/directory/room/{roomAlias}`
///
@ -118,12 +119,20 @@ pub async fn delete_alias_route(body: Ruma<delete_alias::v3::Request>) -> Result
///
/// Resolve an alias locally or over federation.
pub async fn get_alias_route(body: Ruma<get_alias::v3::Request>) -> Result<get_alias::v3::Response> {
get_alias_helper(body.body.room_alias).await
get_alias_helper(body.body.room_alias, None).await
}
pub(crate) async fn get_alias_helper(room_alias: OwnedRoomAliasId) -> Result<get_alias::v3::Response> {
if room_alias.server_name() != services().globals.server_name() {
let response = services()
pub(crate) async fn get_alias_helper(
room_alias: OwnedRoomAliasId, servers: Option<Vec<OwnedServerName>>,
) -> Result<get_alias::v3::Response> {
debug!("get_alias_helper servers: {servers:?}");
if room_alias.server_name() != services().globals.server_name()
&& (!servers
.as_ref()
.is_some_and(|servers| servers.contains(&services().globals.server_name().to_owned()))
|| servers.as_ref().is_none())
{
let mut response = services()
.sending
.send_federation_request(
room_alias.server_name(),
@ -131,47 +140,89 @@ pub(crate) async fn get_alias_helper(room_alias: OwnedRoomAliasId) -> Result<get
room_alias: room_alias.clone(),
},
)
.await?;
.await;
let room_id = response.room_id;
debug_info!("room alias server_name get_alias_helper response: {response:?}");
let mut servers = response.servers;
// since the room alias server_name responded, insert it into the list
servers.push(room_alias.server_name().into());
// find active servers in room state cache to suggest
servers.extend(
services()
.rooms
.state_cache
.room_servers(&room_id)
.filter_map(Result::ok),
);
servers.sort_unstable();
servers.dedup();
// shuffle list of servers randomly after sort and dedupe
servers.shuffle(&mut rand::thread_rng());
// prefer the very first server to be ourselves if available, else prefer the
// room alias server first
if let Some(server_index) = servers
.iter()
.position(|server| server == services().globals.server_name())
{
servers.remove(server_index);
servers.insert(0, services().globals.server_name().to_owned());
} else if let Some(alias_server_index) = servers
.iter()
.position(|server| server == room_alias.server_name())
{
servers.remove(alias_server_index);
servers.insert(0, room_alias.server_name().into());
if let Err(ref e) = response {
debug_info!(
"Server {} of the original room alias failed to assist in resolving room alias: {e}",
room_alias.server_name()
);
}
return Ok(get_alias::v3::Response::new(room_id, servers));
if response.as_ref().is_ok_and(|resp| resp.servers.is_empty()) || response.as_ref().is_err() {
if let Some(servers) = servers {
for server in servers {
response = services()
.sending
.send_federation_request(
&server,
federation::query::get_room_information::v1::Request {
room_alias: room_alias.clone(),
},
)
.await;
debug_info!("Got response from server {server} for room aliases: {response:?}");
if let Ok(ref response) = response {
if !response.servers.is_empty() {
break;
}
debug_warn!(
"Server {server} responded with room aliases, but was empty? Response: {response:?}"
);
}
}
}
}
if let Ok(response) = response {
let room_id = response.room_id;
let mut servers = response.servers;
// since the room alias server_name responded, insert it into the list
servers.push(room_alias.server_name().into());
// find active servers in room state cache to suggest
servers.extend(
services()
.rooms
.state_cache
.room_servers(&room_id)
.filter_map(Result::ok),
);
servers.sort_unstable();
servers.dedup();
// shuffle list of servers randomly after sort and dedupe
servers.shuffle(&mut rand::thread_rng());
// prefer the very first server to be ourselves if available, else prefer the
// room alias server first
if let Some(server_index) = servers
.iter()
.position(|server| server == services().globals.server_name())
{
servers.remove(server_index);
servers.insert(0, services().globals.server_name().to_owned());
} else if let Some(alias_server_index) = servers
.iter()
.position(|server| server == room_alias.server_name())
{
servers.remove(alias_server_index);
servers.insert(0, room_alias.server_name().into());
}
return Ok(get_alias::v3::Response::new(room_id, servers));
}
return Err(Error::BadRequest(
ErrorKind::Unknown,
"No servers could assist in resolving the room alias",
));
}
let mut room_id = None;

View file

@ -692,20 +692,8 @@ async fn download_html(client: &reqwest::Client, url: &str) -> Result<UrlPreview
async fn request_url_preview(url: &str) -> Result<UrlPreviewData> {
if let Ok(ip) = IPAddress::parse(url) {
let cidr_ranges_s = services().globals.ip_range_denylist().to_vec();
let mut cidr_ranges: Vec<IPAddress> = Vec::new();
for cidr in cidr_ranges_s {
cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup"));
}
for cidr in cidr_ranges {
if cidr.includes(&ip) {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Requesting from this address is forbidden",
));
}
if !services().globals.valid_cidr_range(&ip) {
return Err(Error::BadServerResponse("Requesting from this address is forbidden"));
}
}
@ -714,20 +702,8 @@ async fn request_url_preview(url: &str) -> Result<UrlPreviewData> {
if let Some(remote_addr) = response.remote_addr() {
if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) {
let cidr_ranges_s = services().globals.ip_range_denylist().to_vec();
let mut cidr_ranges: Vec<IPAddress> = Vec::new();
for cidr in cidr_ranges_s {
cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup"));
}
for cidr in cidr_ranges {
if cidr.includes(&ip) {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Requesting from this address is forbidden",
));
}
if !services().globals.valid_cidr_range(&ip) {
return Err(Error::BadServerResponse("Requesting from this address is forbidden"));
}
}
}

View file

@ -115,8 +115,9 @@ pub async fn join_room_by_id_route(body: Ruma<join_room_by_id::v3::Request>) ->
///
/// - If the server knowns about this room: creates the join event and does auth
/// rules locally
/// - If the server does not know about the room: asks other servers over
/// federation
/// - If the server does not know about the room: use the server name query
/// param if specified. if not specified, asks other servers over federation
/// via room alias server name and room ID server name
pub async fn join_room_by_id_or_alias_route(
body: Ruma<join_room_by_id_or_alias::v3::Request>,
) -> Result<join_room_by_id_or_alias::v3::Response> {
@ -152,7 +153,6 @@ pub async fn join_room_by_id_or_alias_route(
}
let mut servers = body.server_name.clone();
servers.extend(
services()
.rooms
@ -181,7 +181,24 @@ pub async fn join_room_by_id_or_alias_route(
(servers, room_id)
},
Err(room_alias) => {
let response = get_alias_helper(room_alias.clone()).await?;
if services()
.globals
.config
.forbidden_remote_server_names
.contains(&room_alias.server_name().to_owned())
&& !services().users.is_admin(sender_user)?
{
warn!(
"User {sender_user} tried joining room alias {room_alias} which has a server name that is \
globally forbidden. Rejecting.",
);
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"This remote server is banned on this homeserver.",
));
}
let response = get_alias_helper(room_alias.clone(), Some(body.server_name.clone())).await?;
if services().rooms.metadata.is_banned(&response.room_id)? && !services().users.is_admin(sender_user)? {
return Err(Error::BadRequest(
@ -198,9 +215,9 @@ pub async fn join_room_by_id_or_alias_route(
&& !services().users.is_admin(sender_user)?
{
warn!(
"User {sender_user} tried joining room alias {} with room ID {} which has a server name that is \
globally forbidden. Rejecting.",
&room_alias, &response.room_id
"User {sender_user} tried joining room alias {room_alias} with room ID {}, which the alias has a \
server name that is globally forbidden. Rejecting.",
&response.room_id
);
return Err(Error::BadRequest(
ErrorKind::forbidden(),
@ -217,9 +234,9 @@ pub async fn join_room_by_id_or_alias_route(
&& !services().users.is_admin(sender_user)?
{
warn!(
"User {sender_user} tried joining room alias {} with room ID {} which has a server name that \
is globally forbidden. Rejecting.",
&room_alias, &response.room_id
"User {sender_user} tried joining room alias {room_alias} with room ID {}, which has a server \
name that is globally forbidden. Rejecting.",
&response.room_id
);
return Err(Error::BadRequest(
ErrorKind::forbidden(),
@ -228,7 +245,30 @@ pub async fn join_room_by_id_or_alias_route(
}
}
(response.servers, response.room_id)
let mut servers = body.server_name;
servers.extend(response.servers);
servers.extend(
services()
.rooms
.state_cache
.servers_invite_via(&response.room_id)?
.unwrap_or(
services()
.rooms
.state_cache
.invite_state(sender_user, &response.room_id)?
.unwrap_or_default()
.iter()
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(ToOwned::to_owned))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned())
.collect(),
),
);
(servers, response.room_id)
},
};

View file

@ -8,6 +8,7 @@ use ruma::{
events::{
room::{
canonical_alias::RoomCanonicalAliasEventContent,
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
join_rules::{JoinRule, RoomJoinRulesEventContent},
},
AnyStateEventContent, StateEventType,
@ -251,6 +252,23 @@ async fn send_state_event_for_key_helper(
}
}
},
// admin room is a sensitive room, it should not ever be made world readable
StateEventType::RoomHistoryVisibility => {
if let Some(admin_room_id) = service::admin::Service::get_admin_room()? {
if admin_room_id == room_id {
if let Ok(visibility_content) =
serde_json::from_str::<RoomHistoryVisibilityEventContent>(json.json().get())
{
if visibility_content.history_visibility == HistoryVisibility::WorldReadable {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Admin room is not allowed to be made world readable (public room history).",
));
}
}
}
}
},
// TODO: allow alias if it previously existed
StateEventType::RoomCanonicalAlias => {
if let Ok(canonical_alias) = serde_json::from_str::<RoomCanonicalAliasEventContent>(json.json().get()) {

View file

@ -5,7 +5,7 @@ use tracing::{debug, error, info, warn};
use crate::{utils::error::Error, Config};
pub fn check(config: &Config) -> Result<(), Error> {
pub(super) fn check(config: &Config) -> Result<(), Error> {
config.warn_deprecated();
config.warn_unknown_key();

View file

@ -22,10 +22,10 @@ use serde::{de::IgnoredAny, Deserialize};
use tracing::{debug, error, warn};
use url::Url;
use self::proxy::ProxyConfig;
use self::{check::check, proxy::ProxyConfig};
use crate::utils::error::Error;
mod check;
pub(crate) mod check;
mod proxy;
#[derive(Deserialize, Clone, Debug)]
@ -112,20 +112,22 @@ pub struct Config {
pub request_conn_timeout: u64,
#[serde(default = "default_request_timeout")]
pub request_timeout: u64,
#[serde(default = "default_request_idle_per_host")]
pub request_idle_per_host: u16,
#[serde(default = "default_request_total_timeout")]
pub request_total_timeout: u64,
#[serde(default = "default_request_idle_timeout")]
pub request_idle_timeout: u64,
#[serde(default = "default_request_idle_per_host")]
pub request_idle_per_host: u16,
#[serde(default = "default_well_known_conn_timeout")]
pub well_known_conn_timeout: u64,
#[serde(default = "default_well_known_timeout")]
pub well_known_timeout: u64,
#[serde(default = "default_federation_timeout")]
pub federation_timeout: u64,
#[serde(default = "default_federation_idle_per_host")]
pub federation_idle_per_host: u16,
#[serde(default = "default_federation_idle_timeout")]
pub federation_idle_timeout: u64,
#[serde(default = "default_federation_idle_per_host")]
pub federation_idle_per_host: u16,
#[serde(default = "default_sender_timeout")]
pub sender_timeout: u64,
#[serde(default = "default_sender_idle_timeout")]
@ -371,8 +373,6 @@ impl Config {
Ok(config) => config,
};
check::check(&config)?;
// don't start if we're listening on both UNIX sockets and TCP at same time
if config.is_dual_listening(&raw_config) {
return Err(Error::bad_config("dual listening on UNIX and TCP sockets not allowed."));
@ -452,6 +452,8 @@ impl Config {
.collect::<Vec<_>>(),
}
}
pub fn check(&self) -> Result<(), Error> { check(self) }
}
impl fmt::Display for Config {
@ -502,6 +504,7 @@ impl fmt::Display for Config {
("Maximum concurrent requests", &self.max_concurrent_requests.to_string()),
("Request connect timeout", &self.request_conn_timeout.to_string()),
("Request timeout", &self.request_timeout.to_string()),
("Request total timeout", &self.request_total_timeout.to_string()),
("Idle connections per host", &self.request_idle_per_host.to_string()),
("Request pool idle timeout", &self.request_idle_timeout.to_string()),
("Well_known connect timeout", &self.well_known_conn_timeout.to_string()),
@ -869,20 +872,22 @@ fn default_request_conn_timeout() -> u64 { 10 }
fn default_request_timeout() -> u64 { 35 }
fn default_request_idle_per_host() -> u16 { 1 }
fn default_request_total_timeout() -> u64 { 320 }
fn default_request_idle_timeout() -> u64 { 5 }
fn default_request_idle_per_host() -> u16 { 1 }
fn default_well_known_conn_timeout() -> u64 { 6 }
fn default_well_known_timeout() -> u64 { 10 }
fn default_federation_timeout() -> u64 { 300 }
fn default_federation_idle_per_host() -> u16 { 1 }
fn default_federation_idle_timeout() -> u64 { 25 }
fn default_federation_idle_per_host() -> u16 { 1 }
fn default_sender_timeout() -> u64 { 180 }
fn default_sender_idle_timeout() -> u64 { 180 }
@ -902,7 +907,7 @@ fn default_log() -> String {
if cfg!(debug_assertions) {
"debug".to_owned()
} else {
"warn,ruma_state_res=warn".to_owned()
"info".to_owned()
}
}

View file

@ -3,7 +3,7 @@ use std::fs::Permissions; // not unix specific, just only for UNIX sockets stuff
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt as _; /* not unix specific, just only for UNIX sockets stuff and *nix
* container checks */
use std::{io, net::SocketAddr, sync::atomic, time::Duration};
use std::{any::Any, io, net::SocketAddr, sync::atomic, time::Duration};
use axum::{
extract::{DefaultBodyLimit, MatchedPath},
@ -30,6 +30,7 @@ use tokio::{
};
use tower::ServiceBuilder;
use tower_http::{
catch_panic::CatchPanicLayer,
cors::{self, CorsLayer},
trace::{DefaultOnFailure, TraceLayer},
ServiceBuilderExt as _,
@ -76,7 +77,7 @@ async fn async_main(server: &Server) -> Result<(), Error> {
if let Err(error) = run(server).await {
error!("Critical error running server: {error}");
return Err(Error::Error(format!("{error}")));
};
}
if let Err(error) = stop(server).await {
error!("Critical error stopping server: {error}");
@ -288,7 +289,8 @@ async fn build(server: &Server) -> io::Result<axum::routing::IntoMakeService<Rou
.max_request_size
.try_into()
.expect("failed to convert max request size"),
));
))
.layer(CatchPanicLayer::custom(catch_panic_layer));
#[cfg(any(feature = "zstd_compression", feature = "gzip_compression", feature = "brotli_compression"))]
{
@ -438,6 +440,8 @@ fn init(args: clap::Args) -> Result<Server, Error> {
tracing_reload_handle = init_tracing_sub(&config);
};
config.check()?;
info!(
server_name = ?config.server_name,
database_path = ?config.database_path,
@ -586,3 +590,27 @@ fn maximize_fd_limit() -> Result<(), nix::errno::Errno> {
Ok(())
}
#[allow(clippy::needless_pass_by_value)]
fn catch_panic_layer(err: Box<dyn Any + Send + 'static>) -> http::Response<http_body_util::Full<bytes::Bytes>> {
let details = if let Some(s) = err.downcast_ref::<String>() {
s.clone()
} else if let Some(s) = err.downcast_ref::<&str>() {
s.to_string()
} else {
"Unknown internal server error occurred.".to_owned()
};
let body = serde_json::json!({
"errcode": "M_UNKNOWN",
"error": "M_UNKNOWN: Internal server error occurred",
"details": details,
})
.to_string();
http::Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.header(header::CONTENT_TYPE, "application/json")
.body(http_body_util::Full::from(body))
.expect("Failed to create response for our panic catcher?")
}

View file

@ -1,100 +0,0 @@
use clap::Subcommand;
use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent};
use crate::{service::admin::escape_html, services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum AppserviceCommand {
/// - Register an appservice using its registration YAML
///
/// This command needs a YAML generated by an appservice (such as a bridge),
/// which must be provided in a Markdown code block below the command.
///
/// Registering a new bridge using the ID of an existing bridge will replace
/// the old one.
Register,
/// - Unregister an appservice using its ID
///
/// You can find the ID using the `list-appservices` command.
Unregister {
/// The appservice to unregister
appservice_identifier: String,
},
/// - Show an appservice's config using its ID
///
/// You can find the ID using the `list-appservices` command.
Show {
/// The appservice to show
appservice_identifier: String,
},
/// - List all the currently registered appservices
List,
}
pub(crate) async fn process(command: AppserviceCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
AppserviceCommand::Register => {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let appservice_config = body[1..body.len() - 1].join("\n");
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config);
match parsed_config {
Ok(yaml) => match services().appservice.register_appservice(yaml).await {
Ok(id) => Ok(RoomMessageEventContent::text_plain(format!(
"Appservice registered with ID: {id}."
))),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to register appservice: {e}"
))),
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Could not parse appservice config: {e}"
))),
}
} else {
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
},
AppserviceCommand::Unregister {
appservice_identifier,
} => match services()
.appservice
.unregister_appservice(&appservice_identifier)
.await
{
Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to unregister appservice: {e}"
))),
},
AppserviceCommand::Show {
appservice_identifier,
} => match services()
.appservice
.get_registration(&appservice_identifier)
.await
{
Some(config) => {
let config_str = serde_yaml::to_string(&config).expect("config should've been validated on register");
let output = format!("Config for {}:\n\n```yaml\n{}\n```", appservice_identifier, config_str,);
let output_html = format!(
"Config for {}:\n\n<pre><code class=\"language-yaml\">{}</code></pre>",
escape_html(&appservice_identifier),
escape_html(&config_str),
);
Ok(RoomMessageEventContent::text_html(output, output_html))
},
None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")),
},
AppserviceCommand::List => {
let appservices = services().appservice.iter_ids().await;
let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", "));
Ok(RoomMessageEventContent::text_plain(output))
},
}
}

View file

@ -0,0 +1,66 @@
use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent};
use crate::{service::admin::escape_html, services, Result};
pub(super) async fn register(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let appservice_config = body[1..body.len() - 1].join("\n");
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config);
match parsed_config {
Ok(yaml) => match services().appservice.register_appservice(yaml).await {
Ok(id) => Ok(RoomMessageEventContent::text_plain(format!(
"Appservice registered with ID: {id}."
))),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to register appservice: {e}"
))),
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Could not parse appservice config: {e}"
))),
}
} else {
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
}
pub(super) async fn unregister(_body: Vec<&str>, appservice_identifier: String) -> Result<RoomMessageEventContent> {
match services()
.appservice
.unregister_appservice(&appservice_identifier)
.await
{
Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to unregister appservice: {e}"
))),
}
}
pub(super) async fn show(_body: Vec<&str>, appservice_identifier: String) -> Result<RoomMessageEventContent> {
match services()
.appservice
.get_registration(&appservice_identifier)
.await
{
Some(config) => {
let config_str = serde_yaml::to_string(&config).expect("config should've been validated on register");
let output = format!("Config for {}:\n\n```yaml\n{}\n```", appservice_identifier, config_str,);
let output_html = format!(
"Config for {}:\n\n<pre><code class=\"language-yaml\">{}</code></pre>",
escape_html(&appservice_identifier),
escape_html(&config_str),
);
Ok(RoomMessageEventContent::text_html(output, output_html))
},
None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")),
}
}
pub(super) async fn list(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let appservices = services().appservice.iter_ids().await;
let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", "));
Ok(RoomMessageEventContent::text_plain(output))
}

View file

@ -0,0 +1,52 @@
use clap::Subcommand;
use ruma::events::room::message::RoomMessageEventContent;
use self::appservice_command::{list, register, show, unregister};
use crate::Result;
pub(crate) mod appservice_command;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum AppserviceCommand {
/// - Register an appservice using its registration YAML
///
/// This command needs a YAML generated by an appservice (such as a bridge),
/// which must be provided in a Markdown code block below the command.
///
/// Registering a new bridge using the ID of an existing bridge will replace
/// the old one.
Register,
/// - Unregister an appservice using its ID
///
/// You can find the ID using the `list-appservices` command.
Unregister {
/// The appservice to unregister
appservice_identifier: String,
},
/// - Show an appservice's config using its ID
///
/// You can find the ID using the `list-appservices` command.
Show {
/// The appservice to show
appservice_identifier: String,
},
/// - List all the currently registered appservices
List,
}
pub(crate) async fn process(command: AppserviceCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
AppserviceCommand::Register => register(body).await?,
AppserviceCommand::Unregister {
appservice_identifier,
} => unregister(body, appservice_identifier).await?,
AppserviceCommand::Show {
appservice_identifier,
} => show(body, appservice_identifier).await?,
AppserviceCommand::List => list(body).await?,
})
}

View file

@ -1,432 +0,0 @@
use std::{collections::BTreeMap, sync::Arc, time::Instant};
use clap::Subcommand;
use ruma::{
api::client::error::ErrorKind, events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId,
RoomId, RoomVersionId, ServerName,
};
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
use tracing_subscriber::EnvFilter;
use crate::{api::server_server::parse_incoming_pdu, services, utils::HtmlEscape, Error, PduEvent, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum DebugCommand {
/// - Get the auth_chain of a PDU
GetAuthChain {
/// An event ID (the $ character followed by the base64 reference hash)
event_id: Box<EventId>,
},
/// - Parse and print a PDU from a JSON
///
/// The PDU event is only checked for validity and is not added to the
/// database.
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
ParsePdu,
/// - Retrieve and print a PDU by ID from the conduwuit database
GetPdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: Box<EventId>,
},
/// - Attempts to retrieve a PDU from a remote server. Inserts it into our
/// database/timeline if found and we do not have this PDU already
/// (following normal event auth rules, handles it as an incoming PDU).
GetRemotePdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: Box<EventId>,
/// Argument for us to attempt to fetch the event from the
/// specified remote server.
server: Box<ServerName>,
},
/// - Gets all the room state events for the specified room.
///
/// This is functionally equivalent to `GET
/// /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does
/// *not* check if the sender user is allowed to see state events. This is
/// done because it's implied that server admins here have database access
/// and can see/get room info themselves anyways if they were malicious
/// admins.
///
/// Of course the check is still done on the actual client API.
GetRoomState {
/// Room ID
room_id: Box<RoomId>,
},
/// - Sends a federation request to the remote server's
/// `/_matrix/federation/v1/version` endpoint and measures the latency it
/// took for the server to respond
Ping {
server: Box<ServerName>,
},
/// - Forces device lists for all local and remote users to be updated (as
/// having new keys available)
ForceDeviceListUpdates,
/// - Change tracing log level/filter on the fly
///
/// This accepts the same format as the `log` config option.
ChangeLogLevel {
/// Log level/filter
filter: Option<String>,
/// Resets the log level/filter to the one in your config
#[arg(short, long)]
reset: bool,
},
}
pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
DebugCommand::GetAuthChain {
event_id,
} => {
let event_id = Arc::<EventId>::from(event_id);
if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? {
let room_id_str = event
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let start = Instant::now();
let count = services()
.rooms
.auth_chain
.event_ids_iter(room_id, vec![event_id])
.await?
.count();
let elapsed = start.elapsed();
RoomMessageEventContent::text_plain(format!("Loaded auth chain with length {count} in {elapsed:?}"))
} else {
RoomMessageEventContent::text_plain("Event not found.")
}
},
DebugCommand::ParsePdu => {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
Ok(hash) => {
let event_id = EventId::parse(format!("${hash}"));
match serde_json::from_value::<PduEvent>(
serde_json::to_value(value).expect("value is json"),
) {
Ok(pdu) => {
RoomMessageEventContent::text_plain(format!("EventId: {event_id:?}\n{pdu:#?}"))
},
Err(e) => RoomMessageEventContent::text_plain(format!(
"EventId: {event_id:?}\nCould not parse event: {e}"
)),
}
},
Err(e) => RoomMessageEventContent::text_plain(format!("Could not parse PDU JSON: {e:?}")),
},
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json in command body: {e}")),
}
} else {
RoomMessageEventContent::text_plain("Expected code block in command body.")
}
},
DebugCommand::GetPdu {
event_id,
} => {
let mut outlier = false;
let mut pdu_json = services()
.rooms
.timeline
.get_non_outlier_pdu_json(&event_id)?;
if pdu_json.is_none() {
outlier = true;
pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?;
}
match pdu_json {
Some(json) => {
let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json");
return Ok(RoomMessageEventContent::text_html(
format!(
"{}\n```json\n{}\n```",
if outlier {
"Outlier PDU found in our database"
} else {
"PDU found in our database"
},
json_text
),
format!(
"<p>{}</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
if outlier {
"Outlier PDU found in our database"
} else {
"PDU found in our database"
},
HtmlEscape(&json_text)
),
));
},
None => {
return Ok(RoomMessageEventContent::text_plain("PDU not found locally."));
},
}
},
DebugCommand::GetRemotePdu {
event_id,
server,
} => {
if !services().globals.config.allow_federation {
return Ok(RoomMessageEventContent::text_plain(
"Federation is disabled on this homeserver.",
));
}
if server == services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local \
PDUs.",
));
}
// TODO: use Futures as some requests may take a while so we dont block the
// admin room
match services()
.sending
.send_federation_request(
&server,
ruma::api::federation::event::get_event::v1::Request {
event_id: event_id.clone().into(),
},
)
.await
{
Ok(response) => {
let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| {
warn!(
"Requested event ID {event_id} from server but failed to convert from RawValue to \
CanonicalJsonObject (malformed event/response?): {e}"
);
Error::BadRequest(ErrorKind::Unknown, "Received response from server but failed to parse PDU")
})?;
debug!("Attempting to parse PDU: {:?}", &response.pdu);
let parsed_pdu = {
let parsed_result = parse_incoming_pdu(&response.pdu);
let (event_id, value, room_id) = match parsed_result {
Ok(t) => t,
Err(e) => {
warn!("Failed to parse PDU: {e}");
info!("Full PDU: {:?}", &response.pdu);
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to parse PDU remote server {server} sent us: {e}"
)));
},
};
vec![(event_id, value, room_id)]
};
let pub_key_map = RwLock::new(BTreeMap::new());
debug!("Attempting to fetch homeserver signing keys for {server}");
services()
.rooms
.event_handler
.fetch_required_signing_keys(
parsed_pdu.iter().map(|(_event_id, event, _room_id)| event),
&pub_key_map,
)
.await
.unwrap_or_else(|e| {
warn!("Could not fetch all signatures for PDUs from {server}: {e:?}");
});
info!("Attempting to handle event ID {event_id} as backfilled PDU");
services()
.rooms
.timeline
.backfill_pdu(&server, response.pdu, &pub_key_map)
.await?;
let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json");
return Ok(RoomMessageEventContent::text_html(
format!(
"{}\n```json\n{}\n```",
"Got PDU from specified server and handled as backfilled PDU successfully. Event body:",
json_text
),
format!(
"<p>{}</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
"Got PDU from specified server and handled as backfilled PDU successfully. Event body:",
HtmlEscape(&json_text)
),
));
},
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Remote server did not have PDU or failed sending request to remote server: {e}"
)));
},
}
},
DebugCommand::GetRoomState {
room_id,
} => {
let room_state = services()
.rooms
.state_accessor
.room_state_full(&room_id)
.await?
.values()
.map(|pdu| pdu.to_state_event())
.collect::<Vec<_>>();
if room_state.is_empty() {
return Ok(RoomMessageEventContent::text_plain(
"Unable to find room state in our database (vector is empty)",
));
}
let json_text = serde_json::to_string_pretty(&room_state).map_err(|e| {
error!("Failed converting room state vector in our database to pretty JSON: {e}");
Error::bad_database(
"Failed to convert room state events to pretty JSON, possible invalid room state events in our \
database",
)
})?;
return Ok(RoomMessageEventContent::text_html(
format!("{}\n```json\n{}\n```", "Found full room state", json_text),
format!(
"<p>{}</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
"Found full room state",
HtmlEscape(&json_text)
),
));
},
DebugCommand::Ping {
server,
} => {
if server == services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to send federation requests to ourselves.",
));
}
let timer = tokio::time::Instant::now();
match services()
.sending
.send_federation_request(&server, ruma::api::federation::discovery::get_server_version::v1::Request {})
.await
{
Ok(response) => {
let ping_time = timer.elapsed();
let json_text_res = serde_json::to_string_pretty(&response.server);
if let Ok(json) = json_text_res {
return Ok(RoomMessageEventContent::text_html(
format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```"),
format!(
"<p>Got response which took {ping_time:?} time:</p>\n<pre><code \
class=\"language-json\">{}\n</code></pre>\n",
HtmlEscape(&json)
),
));
}
return Ok(RoomMessageEventContent::text_plain(format!(
"Got non-JSON response which took {ping_time:?} time:\n{0:?}",
response
)));
},
Err(e) => {
error!("Failed sending federation request to specified server from ping debug command: {e}");
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed sending federation request to specified server:\n\n{e}",
)));
},
}
},
DebugCommand::ForceDeviceListUpdates => {
// Force E2EE device list updates for all users
for user_id in services().users.iter().filter_map(Result::ok) {
services().users.mark_device_key_update(&user_id)?;
}
RoomMessageEventContent::text_plain("Marked all devices for all users as having new keys to update")
},
DebugCommand::ChangeLogLevel {
filter,
reset,
} => {
if reset {
let old_filter_layer = match EnvFilter::try_new(&services().globals.config.log) {
Ok(s) => s,
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Log level from config appears to be invalid now: {e}"
)));
},
};
match services()
.globals
.tracing_reload_handle
.modify(|filter| *filter = old_filter_layer)
{
Ok(()) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Successfully changed log level back to config value {}",
services().globals.config.log
)));
},
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to modify and reload the global tracing log level: {e}"
)));
},
}
}
if let Some(filter) = filter {
let new_filter_layer = match EnvFilter::try_new(filter) {
Ok(s) => s,
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Invalid log level filter specified: {e}"
)));
},
};
match services()
.globals
.tracing_reload_handle
.modify(|filter| *filter = new_filter_layer)
{
Ok(()) => {
return Ok(RoomMessageEventContent::text_plain("Successfully changed log level"));
},
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to modify and reload the global tracing log level: {e}"
)));
},
}
}
return Ok(RoomMessageEventContent::text_plain("No log level was specified."));
},
})
}

View file

@ -0,0 +1,430 @@
use std::{collections::BTreeMap, sync::Arc, time::Instant};
use ruma::{
api::client::error::ErrorKind, events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId,
RoomId, RoomVersionId, ServerName,
};
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
use tracing_subscriber::EnvFilter;
use crate::{api::server_server::parse_incoming_pdu, services, utils::HtmlEscape, Error, PduEvent, Result};
pub(super) async fn get_auth_chain(_body: Vec<&str>, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
let event_id = Arc::<EventId>::from(event_id);
if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? {
let room_id_str = event
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let start = Instant::now();
let count = services()
.rooms
.auth_chain
.event_ids_iter(room_id, vec![event_id])
.await?
.count();
let elapsed = start.elapsed();
Ok(RoomMessageEventContent::text_plain(format!(
"Loaded auth chain with length {count} in {elapsed:?}"
)))
} else {
Ok(RoomMessageEventContent::text_plain("Event not found."))
}
}
pub(super) async fn parse_pdu(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
Ok(hash) => {
let event_id = EventId::parse(format!("${hash}"));
match serde_json::from_value::<PduEvent>(serde_json::to_value(value).expect("value is json")) {
Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!("EventId: {event_id:?}\n{pdu:#?}"))),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"EventId: {event_id:?}\nCould not parse event: {e}"
))),
}
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Could not parse PDU JSON: {e:?}"))),
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Invalid json in command body: {e}"
))),
}
} else {
Ok(RoomMessageEventContent::text_plain("Expected code block in command body."))
}
}
pub(super) async fn get_pdu(_body: Vec<&str>, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
let mut outlier = false;
let mut pdu_json = services()
.rooms
.timeline
.get_non_outlier_pdu_json(&event_id)?;
if pdu_json.is_none() {
outlier = true;
pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?;
}
match pdu_json {
Some(json) => {
let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json");
Ok(RoomMessageEventContent::text_html(
format!(
"{}\n```json\n{}\n```",
if outlier {
"Outlier PDU found in our database"
} else {
"PDU found in our database"
},
json_text
),
format!(
"<p>{}</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
if outlier {
"Outlier PDU found in our database"
} else {
"PDU found in our database"
},
HtmlEscape(&json_text)
),
))
},
None => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
}
}
pub(super) async fn get_remote_pdu_list(
body: Vec<&str>, server: Box<ServerName>, force: bool,
) -> Result<RoomMessageEventContent> {
if !services().globals.config.allow_federation {
return Ok(RoomMessageEventContent::text_plain(
"Federation is disabled on this homeserver.",
));
}
if server == services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local PDUs.",
));
}
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let list = body
.clone()
.drain(1..body.len() - 1)
.filter_map(|pdu| EventId::parse(pdu).ok())
.collect::<Vec<_>>();
for pdu in list {
if force {
_ = get_remote_pdu(Vec::new(), Box::from(pdu), server.clone()).await;
} else {
get_remote_pdu(Vec::new(), Box::from(pdu), server.clone()).await?;
}
}
return Ok(RoomMessageEventContent::text_plain("Fetched list of remote PDUs."));
}
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
pub(super) async fn get_remote_pdu(
_body: Vec<&str>, event_id: Box<EventId>, server: Box<ServerName>,
) -> Result<RoomMessageEventContent> {
if !services().globals.config.allow_federation {
return Ok(RoomMessageEventContent::text_plain(
"Federation is disabled on this homeserver.",
));
}
if server == services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local PDUs.",
));
}
match services()
.sending
.send_federation_request(
&server,
ruma::api::federation::event::get_event::v1::Request {
event_id: event_id.clone().into(),
},
)
.await
{
Ok(response) => {
let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| {
warn!(
"Requested event ID {event_id} from server but failed to convert from RawValue to \
CanonicalJsonObject (malformed event/response?): {e}"
);
Error::BadRequest(ErrorKind::Unknown, "Received response from server but failed to parse PDU")
})?;
debug!("Attempting to parse PDU: {:?}", &response.pdu);
let parsed_pdu = {
let parsed_result = parse_incoming_pdu(&response.pdu);
let (event_id, value, room_id) = match parsed_result {
Ok(t) => t,
Err(e) => {
warn!("Failed to parse PDU: {e}");
info!("Full PDU: {:?}", &response.pdu);
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to parse PDU remote server {server} sent us: {e}"
)));
},
};
vec![(event_id, value, room_id)]
};
let pub_key_map = RwLock::new(BTreeMap::new());
debug!("Attempting to fetch homeserver signing keys for {server}");
services()
.rooms
.event_handler
.fetch_required_signing_keys(parsed_pdu.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map)
.await
.unwrap_or_else(|e| {
warn!("Could not fetch all signatures for PDUs from {server}: {e:?}");
});
info!("Attempting to handle event ID {event_id} as backfilled PDU");
services()
.rooms
.timeline
.backfill_pdu(&server, response.pdu, &pub_key_map)
.await?;
let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json");
Ok(RoomMessageEventContent::text_html(
format!(
"{}\n```json\n{}\n```",
"Got PDU from specified server and handled as backfilled PDU successfully. Event body:", json_text
),
format!(
"<p>{}</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
"Got PDU from specified server and handled as backfilled PDU successfully. Event body:",
HtmlEscape(&json_text)
),
))
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Remote server did not have PDU or failed sending request to remote server: {e}"
))),
}
}
pub(super) async fn get_room_state(_body: Vec<&str>, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
let room_state = services()
.rooms
.state_accessor
.room_state_full(&room_id)
.await?
.values()
.map(|pdu| pdu.to_state_event())
.collect::<Vec<_>>();
if room_state.is_empty() {
return Ok(RoomMessageEventContent::text_plain(
"Unable to find room state in our database (vector is empty)",
));
}
let json_text = serde_json::to_string_pretty(&room_state).map_err(|e| {
error!("Failed converting room state vector in our database to pretty JSON: {e}");
Error::bad_database(
"Failed to convert room state events to pretty JSON, possible invalid room state events in our database",
)
})?;
Ok(RoomMessageEventContent::text_html(
format!("{}\n```json\n{}\n```", "Found full room state", json_text),
format!(
"<p>{}</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
"Found full room state",
HtmlEscape(&json_text)
),
))
}
pub(super) async fn ping(_body: Vec<&str>, server: Box<ServerName>) -> Result<RoomMessageEventContent> {
if server == services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to send federation requests to ourselves.",
));
}
let timer = tokio::time::Instant::now();
match services()
.sending
.send_federation_request(&server, ruma::api::federation::discovery::get_server_version::v1::Request {})
.await
{
Ok(response) => {
let ping_time = timer.elapsed();
let json_text_res = serde_json::to_string_pretty(&response.server);
if let Ok(json) = json_text_res {
return Ok(RoomMessageEventContent::text_html(
format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```"),
format!(
"<p>Got response which took {ping_time:?} time:</p>\n<pre><code \
class=\"language-json\">{}\n</code></pre>\n",
HtmlEscape(&json)
),
));
}
Ok(RoomMessageEventContent::text_plain(format!(
"Got non-JSON response which took {ping_time:?} time:\n{0:?}",
response
)))
},
Err(e) => {
error!("Failed sending federation request to specified server from ping debug command: {e}");
Ok(RoomMessageEventContent::text_plain(format!(
"Failed sending federation request to specified server:\n\n{e}",
)))
},
}
}
pub(super) async fn force_device_list_updates(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
// Force E2EE device list updates for all users
for user_id in services().users.iter().filter_map(Result::ok) {
services().users.mark_device_key_update(&user_id)?;
}
Ok(RoomMessageEventContent::text_plain(
"Marked all devices for all users as having new keys to update",
))
}
pub(super) async fn change_log_level(
_body: Vec<&str>, filter: Option<String>, reset: bool,
) -> Result<RoomMessageEventContent> {
if reset {
let old_filter_layer = match EnvFilter::try_new(&services().globals.config.log) {
Ok(s) => s,
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Log level from config appears to be invalid now: {e}"
)));
},
};
match services()
.globals
.tracing_reload_handle
.modify(|filter| *filter = old_filter_layer)
{
Ok(()) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Successfully changed log level back to config value {}",
services().globals.config.log
)));
},
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to modify and reload the global tracing log level: {e}"
)));
},
}
}
if let Some(filter) = filter {
let new_filter_layer = match EnvFilter::try_new(filter) {
Ok(s) => s,
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Invalid log level filter specified: {e}"
)));
},
};
match services()
.globals
.tracing_reload_handle
.modify(|filter| *filter = new_filter_layer)
{
Ok(()) => {
return Ok(RoomMessageEventContent::text_plain("Successfully changed log level"));
},
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to modify and reload the global tracing log level: {e}"
)));
},
}
}
Ok(RoomMessageEventContent::text_plain("No log level was specified."))
}
pub(super) async fn sign_json(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(mut value) => {
ruma::signatures::sign_json(
services().globals.server_name().as_str(),
services().globals.keypair(),
&mut value,
)
.expect("our request json is what ruma expects");
let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json");
Ok(RoomMessageEventContent::text_plain(json_text))
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
}
} else {
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
}
pub(super) async fn verify_json(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(value) => {
let pub_key_map = RwLock::new(BTreeMap::new());
services()
.rooms
.event_handler
.fetch_required_signing_keys([&value], &pub_key_map)
.await?;
let pub_key_map = pub_key_map.read().await;
match ruma::signatures::verify_json(&pub_key_map, &value) {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Signature verification failed: {e}"
))),
}
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
}
} else {
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
}

View file

@ -0,0 +1,142 @@
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, EventId, RoomId, ServerName};
use self::debug_commands::{
change_log_level, force_device_list_updates, get_auth_chain, get_pdu, get_remote_pdu, get_remote_pdu_list,
get_room_state, parse_pdu, ping, sign_json, verify_json,
};
use crate::Result;
pub(crate) mod debug_commands;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum DebugCommand {
/// - Get the auth_chain of a PDU
GetAuthChain {
/// An event ID (the $ character followed by the base64 reference hash)
event_id: Box<EventId>,
},
/// - Parse and print a PDU from a JSON
///
/// The PDU event is only checked for validity and is not added to the
/// database.
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
ParsePdu,
/// - Retrieve and print a PDU by ID from the conduwuit database
GetPdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: Box<EventId>,
},
/// - Attempts to retrieve a PDU from a remote server. Inserts it into our
/// database/timeline if found and we do not have this PDU already
/// (following normal event auth rules, handles it as an incoming PDU).
GetRemotePdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: Box<EventId>,
/// Argument for us to attempt to fetch the event from the
/// specified remote server.
server: Box<ServerName>,
},
/// Same as `get-remote-pdu` but accepts a codeblock newline delimited list
/// of PDUs and a single server to fetch from
GetRemotePduList {
/// Argument for us to attempt to fetch all the events from the
/// specified remote server.
server: Box<ServerName>,
/// If set, ignores errors, else stops at the first error/failure.
#[arg(short, long)]
force: bool,
},
/// - Gets all the room state events for the specified room.
///
/// This is functionally equivalent to `GET
/// /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does
/// *not* check if the sender user is allowed to see state events. This is
/// done because it's implied that server admins here have database access
/// and can see/get room info themselves anyways if they were malicious
/// admins.
///
/// Of course the check is still done on the actual client API.
GetRoomState {
/// Room ID
room_id: Box<RoomId>,
},
/// - Sends a federation request to the remote server's
/// `/_matrix/federation/v1/version` endpoint and measures the latency it
/// took for the server to respond
Ping {
server: Box<ServerName>,
},
/// - Forces device lists for all local and remote users to be updated (as
/// having new keys available)
ForceDeviceListUpdates,
/// - Change tracing log level/filter on the fly
///
/// This accepts the same format as the `log` config option.
ChangeLogLevel {
/// Log level/filter
filter: Option<String>,
/// Resets the log level/filter to the one in your config
#[arg(short, long)]
reset: bool,
},
/// - Verify json signatures
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
SignJson,
/// - Verify json signatures
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
VerifyJson,
}
pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
DebugCommand::GetAuthChain {
event_id,
} => get_auth_chain(body, event_id).await?,
DebugCommand::ParsePdu => parse_pdu(body).await?,
DebugCommand::GetPdu {
event_id,
} => get_pdu(body, event_id).await?,
DebugCommand::GetRemotePdu {
event_id,
server,
} => get_remote_pdu(body, event_id, server).await?,
DebugCommand::GetRoomState {
room_id,
} => get_room_state(body, room_id).await?,
DebugCommand::Ping {
server,
} => ping(body, server).await?,
DebugCommand::ForceDeviceListUpdates => force_device_list_updates(body).await?,
DebugCommand::ChangeLogLevel {
filter,
reset,
} => change_log_level(body, filter, reset).await?,
DebugCommand::SignJson => sign_json(body).await?,
DebugCommand::VerifyJson => verify_json(body).await?,
DebugCommand::GetRemotePduList {
server,
force,
} => get_remote_pdu_list(body, server, force).await?,
})
}

View file

@ -1,172 +0,0 @@
use std::{collections::BTreeMap, fmt::Write as _};
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName};
use tokio::sync::RwLock;
use crate::{services, utils::HtmlEscape, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum FederationCommand {
/// - List all rooms we are currently handling an incoming pdu from
IncomingFederation,
/// - Disables incoming federation handling for a room.
DisableRoom {
room_id: Box<RoomId>,
},
/// - Enables incoming federation handling for a room again.
EnableRoom {
room_id: Box<RoomId>,
},
/// - Verify json signatures
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
SignJson,
/// - Verify json signatures
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
VerifyJson,
/// - Fetch `/.well-known/matrix/support` from the specified server
///
/// Despite the name, this is not a federation endpoint and does not go
/// through the federation / server resolution process as per-spec this is
/// supposed to be served at the server_name.
///
/// Respecting homeservers put this file here for listing administration,
/// moderation, and security inquiries. This command provides a way to
/// easily fetch that information.
FetchSupportWellKnown {
server_name: Box<ServerName>,
},
}
pub(crate) async fn process(command: FederationCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
FederationCommand::DisableRoom {
room_id,
} => {
services().rooms.metadata.disable_room(&room_id, true)?;
Ok(RoomMessageEventContent::text_plain("Room disabled."))
},
FederationCommand::EnableRoom {
room_id,
} => {
services().rooms.metadata.disable_room(&room_id, false)?;
Ok(RoomMessageEventContent::text_plain("Room enabled."))
},
FederationCommand::IncomingFederation => {
let map = services().globals.roomid_federationhandletime.read().await;
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
for (r, (e, i)) in map.iter() {
let elapsed = i.elapsed();
let _ = writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60);
}
Ok(RoomMessageEventContent::text_plain(&msg))
},
FederationCommand::SignJson => {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(mut value) => {
ruma::signatures::sign_json(
services().globals.server_name().as_str(),
services().globals.keypair(),
&mut value,
)
.expect("our request json is what ruma expects");
let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json");
Ok(RoomMessageEventContent::text_plain(json_text))
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
}
} else {
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
},
FederationCommand::VerifyJson => {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(value) => {
let pub_key_map = RwLock::new(BTreeMap::new());
services()
.rooms
.event_handler
.fetch_required_signing_keys([&value], &pub_key_map)
.await?;
let pub_key_map = pub_key_map.read().await;
match ruma::signatures::verify_json(&pub_key_map, &value) {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Signature verification failed: {e}"
))),
}
},
Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
}
} else {
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
},
FederationCommand::FetchSupportWellKnown {
server_name,
} => {
let response = services()
.globals
.client
.default
.get(format!("https://{server_name}/.well-known/matrix/support"))
.send()
.await?;
let text = response.text().await?;
if text.is_empty() {
return Ok(RoomMessageEventContent::text_plain("Response text/body is empty."));
}
if text.len() > 1500 {
return Ok(RoomMessageEventContent::text_plain(
"Response text/body is over 1500 characters, assuming no support well-known.",
));
}
let json: serde_json::Value = match serde_json::from_str(&text) {
Ok(json) => json,
Err(_) => {
return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON."));
},
};
let pretty_json: String = match serde_json::to_string_pretty(&json) {
Ok(json) => json,
Err(_) => {
return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON."));
},
};
Ok(RoomMessageEventContent::text_html(
format!("Got JSON response:\n\n```json\n{pretty_json}\n```"),
format!(
"<p>Got JSON response:</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
HtmlEscape(&pretty_json)
),
))
},
}
}

View file

@ -0,0 +1,72 @@
use std::fmt::Write as _;
use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName};
use crate::{services, utils::HtmlEscape, Result};
pub(super) async fn disable_room(_body: Vec<&str>, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
services().rooms.metadata.disable_room(&room_id, true)?;
Ok(RoomMessageEventContent::text_plain("Room disabled."))
}
pub(super) async fn enable_room(_body: Vec<&str>, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
services().rooms.metadata.disable_room(&room_id, false)?;
Ok(RoomMessageEventContent::text_plain("Room enabled."))
}
pub(super) async fn incoming_federeation(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let map = services().globals.roomid_federationhandletime.read().await;
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
for (r, (e, i)) in map.iter() {
let elapsed = i.elapsed();
let _ = writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60);
}
Ok(RoomMessageEventContent::text_plain(&msg))
}
pub(super) async fn fetch_support_well_known(
_body: Vec<&str>, server_name: Box<ServerName>,
) -> Result<RoomMessageEventContent> {
let response = services()
.globals
.client
.default
.get(format!("https://{server_name}/.well-known/matrix/support"))
.send()
.await?;
let text = response.text().await?;
if text.is_empty() {
return Ok(RoomMessageEventContent::text_plain("Response text/body is empty."));
}
if text.len() > 1500 {
return Ok(RoomMessageEventContent::text_plain(
"Response text/body is over 1500 characters, assuming no support well-known.",
));
}
let json: serde_json::Value = match serde_json::from_str(&text) {
Ok(json) => json,
Err(_) => {
return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON."));
},
};
let pretty_json: String = match serde_json::to_string_pretty(&json) {
Ok(json) => json,
Err(_) => {
return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON."));
},
};
Ok(RoomMessageEventContent::text_html(
format!("Got JSON response:\n\n```json\n{pretty_json}\n```"),
format!(
"<p>Got JSON response:</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
HtmlEscape(&pretty_json)
),
))
}

View file

@ -0,0 +1,52 @@
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName};
use self::federation_commands::{disable_room, enable_room, fetch_support_well_known, incoming_federeation};
use crate::Result;
pub(crate) mod federation_commands;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum FederationCommand {
/// - List all rooms we are currently handling an incoming pdu from
IncomingFederation,
/// - Disables incoming federation handling for a room.
DisableRoom {
room_id: Box<RoomId>,
},
/// - Enables incoming federation handling for a room again.
EnableRoom {
room_id: Box<RoomId>,
},
/// - Fetch `/.well-known/matrix/support` from the specified server
///
/// Despite the name, this is not a federation endpoint and does not go
/// through the federation / server resolution process as per-spec this is
/// supposed to be served at the server_name.
///
/// Respecting homeservers put this file here for listing administration,
/// moderation, and security inquiries. This command provides a way to
/// easily fetch that information.
FetchSupportWellKnown {
server_name: Box<ServerName>,
},
}
pub(crate) async fn process(command: FederationCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
FederationCommand::DisableRoom {
room_id,
} => disable_room(body, room_id).await?,
FederationCommand::EnableRoom {
room_id,
} => enable_room(body, room_id).await?,
FederationCommand::IncomingFederation => incoming_federeation(body).await?,
FederationCommand::FetchSupportWellKnown {
server_name,
} => fetch_support_well_known(body, server_name).await?,
})
}

View file

@ -0,0 +1,26 @@
use ruma::events::room::message::RoomMessageEventContent;
use crate::{services, Result};
/// Uses the iterator in `src/database/key_value/users.rs` to iterator over
/// every user in our database (remote and local). Reports total count, any
/// errors if there were any, etc
pub(super) async fn check_all_users(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let timer = tokio::time::Instant::now();
let results = services().users.db.iter();
let query_time = timer.elapsed();
let users = results.collect::<Vec<_>>();
let total = users.len();
let err_count = users.iter().filter(|user| user.is_err()).count();
let ok_count = users.iter().filter(|user| user.is_ok()).count();
let message = format!(
"Database query completed in {query_time:?}:\n\n```\nTotal entries: {:?}\nFailure/Invalid user count: \
{:?}\nSuccess/Valid user count: {:?}```",
total, err_count, ok_count
);
Ok(RoomMessageEventContent::notice_html(message, String::new()))
}

View file

@ -0,0 +1,19 @@
use clap::Subcommand;
use ruma::events::room::message::RoomMessageEventContent;
use self::fsck_commands::check_all_users;
use crate::Result;
pub(crate) mod fsck_commands;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum FsckCommand {
CheckAllUsers,
}
pub(crate) async fn process(command: FsckCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
FsckCommand::CheckAllUsers => check_all_users(body).await?,
})
}

View file

@ -1,216 +0,0 @@
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, EventId};
use tracing::{debug, info};
use crate::{service::admin::MxcUri, services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum MediaCommand {
/// - Deletes a single media file from our database and on the filesystem
/// via a single MXC URL
Delete {
/// The MXC URL to delete
#[arg(long)]
mxc: Option<Box<MxcUri>>,
/// - The message event ID which contains the media and thumbnail MXC
/// URLs
#[arg(long)]
event_id: Option<Box<EventId>>,
},
/// - Deletes a codeblock list of MXC URLs from our database and on the
/// filesystem
DeleteList,
/// - Deletes all remote media in the last X amount of time using filesystem
/// metadata first created at date.
DeletePastRemoteMedia {
/// - The duration (at or after), e.g. "5m" to delete all media in the
/// past 5 minutes
duration: String,
},
}
pub(crate) async fn process(command: MediaCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
MediaCommand::Delete {
mxc,
event_id,
} => {
if event_id.is_some() && mxc.is_some() {
return Ok(RoomMessageEventContent::text_plain(
"Please specify either an MXC or an event ID, not both.",
));
}
if let Some(mxc) = mxc {
if !mxc.to_string().starts_with("mxc://") {
return Ok(RoomMessageEventContent::text_plain("MXC provided is not valid."));
}
debug!("Got MXC URL: {}", mxc);
services().media.delete(mxc.to_string()).await?;
return Ok(RoomMessageEventContent::text_plain(
"Deleted the MXC from our database and on our filesystem.",
));
} else if let Some(event_id) = event_id {
debug!("Got event ID to delete media from: {}", event_id);
let mut mxc_urls = vec![];
let mut mxc_deletion_count = 0;
// parsing the PDU for any MXC URLs begins here
if let Some(event_json) = services().rooms.timeline.get_pdu_json(&event_id)? {
if let Some(content_key) = event_json.get("content") {
debug!("Event ID has \"content\".");
let content_obj = content_key.as_object();
if let Some(content) = content_obj {
// 1. attempts to parse the "url" key
debug!("Attempting to go into \"url\" key for main media file");
if let Some(url) = content.get("url") {
debug!("Got a URL in the event ID {event_id}: {url}");
if url.to_string().starts_with("\"mxc://") {
debug!("Pushing URL {} to list of MXCs to delete", url);
let final_url = url.to_string().replace('"', "");
mxc_urls.push(final_url);
} else {
info!(
"Found a URL in the event ID {event_id} but did not start with mxc://, \
ignoring"
);
}
}
// 2. attempts to parse the "info" key
debug!("Attempting to go into \"info\" key for thumbnails");
if let Some(info_key) = content.get("info") {
debug!("Event ID has \"info\".");
let info_obj = info_key.as_object();
if let Some(info) = info_obj {
if let Some(thumbnail_url) = info.get("thumbnail_url") {
debug!("Found a thumbnail_url in info key: {thumbnail_url}");
if thumbnail_url.to_string().starts_with("\"mxc://") {
debug!("Pushing thumbnail URL {} to list of MXCs to delete", thumbnail_url);
let final_thumbnail_url = thumbnail_url.to_string().replace('"', "");
mxc_urls.push(final_thumbnail_url);
} else {
info!(
"Found a thumbnail URL in the event ID {event_id} but did not start \
with mxc://, ignoring"
);
}
} else {
info!("No \"thumbnail_url\" key in \"info\" key, assuming no thumbnails.");
}
}
}
// 3. attempts to parse the "file" key
debug!("Attempting to go into \"file\" key");
if let Some(file_key) = content.get("file") {
debug!("Event ID has \"file\".");
let file_obj = file_key.as_object();
if let Some(file) = file_obj {
if let Some(url) = file.get("url") {
debug!("Found url in file key: {url}");
if url.to_string().starts_with("\"mxc://") {
debug!("Pushing URL {} to list of MXCs to delete", url);
let final_url = url.to_string().replace('"', "");
mxc_urls.push(final_url);
} else {
info!(
"Found a URL in the event ID {event_id} but did not start with \
mxc://, ignoring"
);
}
} else {
info!("No \"url\" key in \"file\" key.");
}
}
}
} else {
return Ok(RoomMessageEventContent::text_plain(
"Event ID does not have a \"content\" key or failed parsing the event ID JSON.",
));
}
} else {
return Ok(RoomMessageEventContent::text_plain(
"Event ID does not have a \"content\" key, this is not a message or an event type that \
contains media.",
));
}
} else {
return Ok(RoomMessageEventContent::text_plain(
"Event ID does not exist or is not known to us.",
));
}
if mxc_urls.is_empty() {
// we shouldn't get here (should have errored earlier) but just in case for
// whatever reason we do...
info!("Parsed event ID {event_id} but did not contain any MXC URLs.");
return Ok(RoomMessageEventContent::text_plain("Parsed event ID but found no MXC URLs."));
}
for mxc_url in mxc_urls {
services().media.delete(mxc_url).await?;
mxc_deletion_count += 1;
}
return Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from event ID \
{event_id}."
)));
}
Ok(RoomMessageEventContent::text_plain(
"Please specify either an MXC using --mxc or an event ID using --event-id of the message containing \
an image. See --help for details.",
))
},
MediaCommand::DeleteList => {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let mxc_list = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let mut mxc_deletion_count = 0;
for mxc in mxc_list {
debug!("Deleting MXC {} in bulk", mxc);
services().media.delete(mxc.to_owned()).await?;
mxc_deletion_count += 1;
}
return Ok(RoomMessageEventContent::text_plain(format!(
"Finished bulk MXC deletion, deleted {} total MXCs from our database and the filesystem.",
mxc_deletion_count
)));
}
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
},
MediaCommand::DeletePastRemoteMedia {
duration,
} => {
let deleted_count = services()
.media
.delete_all_remote_media_at_after_time(duration)
.await?;
Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {} total files.",
deleted_count
)))
},
}
}

View file

@ -0,0 +1,171 @@
use ruma::{events::room::message::RoomMessageEventContent, EventId};
use tracing::{debug, info};
use crate::{service::admin::MxcUri, services, Result};
pub(super) async fn delete(
_body: Vec<&str>, mxc: Option<Box<MxcUri>>, event_id: Option<Box<EventId>>,
) -> Result<RoomMessageEventContent> {
if event_id.is_some() && mxc.is_some() {
return Ok(RoomMessageEventContent::text_plain(
"Please specify either an MXC or an event ID, not both.",
));
}
if let Some(mxc) = mxc {
debug!("Got MXC URL: {mxc}");
services().media.delete(mxc.to_string()).await?;
return Ok(RoomMessageEventContent::text_plain(
"Deleted the MXC from our database and on our filesystem.",
));
} else if let Some(event_id) = event_id {
debug!("Got event ID to delete media from: {event_id}");
let mut mxc_urls = vec![];
let mut mxc_deletion_count = 0;
// parsing the PDU for any MXC URLs begins here
if let Some(event_json) = services().rooms.timeline.get_pdu_json(&event_id)? {
if let Some(content_key) = event_json.get("content") {
debug!("Event ID has \"content\".");
let content_obj = content_key.as_object();
if let Some(content) = content_obj {
// 1. attempts to parse the "url" key
debug!("Attempting to go into \"url\" key for main media file");
if let Some(url) = content.get("url") {
debug!("Got a URL in the event ID {event_id}: {url}");
if url.to_string().starts_with("\"mxc://") {
debug!("Pushing URL {url} to list of MXCs to delete");
let final_url = url.to_string().replace('"', "");
mxc_urls.push(final_url);
} else {
info!("Found a URL in the event ID {event_id} but did not start with mxc://, ignoring");
}
}
// 2. attempts to parse the "info" key
debug!("Attempting to go into \"info\" key for thumbnails");
if let Some(info_key) = content.get("info") {
debug!("Event ID has \"info\".");
let info_obj = info_key.as_object();
if let Some(info) = info_obj {
if let Some(thumbnail_url) = info.get("thumbnail_url") {
debug!("Found a thumbnail_url in info key: {thumbnail_url}");
if thumbnail_url.to_string().starts_with("\"mxc://") {
debug!("Pushing thumbnail URL {thumbnail_url} to list of MXCs to delete");
let final_thumbnail_url = thumbnail_url.to_string().replace('"', "");
mxc_urls.push(final_thumbnail_url);
} else {
info!(
"Found a thumbnail URL in the event ID {event_id} but did not start with \
mxc://, ignoring"
);
}
} else {
info!("No \"thumbnail_url\" key in \"info\" key, assuming no thumbnails.");
}
}
}
// 3. attempts to parse the "file" key
debug!("Attempting to go into \"file\" key");
if let Some(file_key) = content.get("file") {
debug!("Event ID has \"file\".");
let file_obj = file_key.as_object();
if let Some(file) = file_obj {
if let Some(url) = file.get("url") {
debug!("Found url in file key: {url}");
if url.to_string().starts_with("\"mxc://") {
debug!("Pushing URL {url} to list of MXCs to delete");
let final_url = url.to_string().replace('"', "");
mxc_urls.push(final_url);
} else {
info!(
"Found a URL in the event ID {event_id} but did not start with mxc://, \
ignoring"
);
}
} else {
info!("No \"url\" key in \"file\" key.");
}
}
}
} else {
return Ok(RoomMessageEventContent::text_plain(
"Event ID does not have a \"content\" key or failed parsing the event ID JSON.",
));
}
} else {
return Ok(RoomMessageEventContent::text_plain(
"Event ID does not have a \"content\" key, this is not a message or an event type that contains \
media.",
));
}
} else {
return Ok(RoomMessageEventContent::text_plain(
"Event ID does not exist or is not known to us.",
));
}
if mxc_urls.is_empty() {
// we shouldn't get here (should have errored earlier) but just in case for
// whatever reason we do...
info!("Parsed event ID {event_id} but did not contain any MXC URLs.");
return Ok(RoomMessageEventContent::text_plain("Parsed event ID but found no MXC URLs."));
}
for mxc_url in mxc_urls {
services().media.delete(mxc_url).await?;
mxc_deletion_count += 1;
}
return Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from event ID {event_id}."
)));
}
Ok(RoomMessageEventContent::text_plain(
"Please specify either an MXC using --mxc or an event ID using --event-id of the message containing an image. \
See --help for details.",
))
}
pub(super) async fn delete_list(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let mxc_list = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let mut mxc_deletion_count = 0;
for mxc in mxc_list {
debug!("Deleting MXC {mxc} in bulk");
services().media.delete(mxc.to_owned()).await?;
mxc_deletion_count += 1;
}
return Ok(RoomMessageEventContent::text_plain(format!(
"Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database and the filesystem.",
)));
}
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
pub(super) async fn delete_past_remote_media(_body: Vec<&str>, duration: String) -> Result<RoomMessageEventContent> {
let deleted_count = services()
.media
.delete_all_remote_media_at_after_time(duration)
.await?;
Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {deleted_count} total files.",
)))
}

View file

@ -0,0 +1,49 @@
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, EventId};
use self::media_commands::{delete, delete_list, delete_past_remote_media};
use crate::{service::admin::MxcUri, Result};
pub(crate) mod media_commands;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum MediaCommand {
/// - Deletes a single media file from our database and on the filesystem
/// via a single MXC URL
Delete {
/// The MXC URL to delete
#[arg(long)]
mxc: Option<Box<MxcUri>>,
/// - The message event ID which contains the media and thumbnail MXC
/// URLs
#[arg(long)]
event_id: Option<Box<EventId>>,
},
/// - Deletes a codeblock list of MXC URLs from our database and on the
/// filesystem
DeleteList,
/// - Deletes all remote media in the last X amount of time using filesystem
/// metadata first created at date.
DeletePastRemoteMedia {
/// - The duration (at or after), e.g. "5m" to delete all media in the
/// past 5 minutes
duration: String,
},
}
pub(crate) async fn process(command: MediaCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
MediaCommand::Delete {
mxc,
event_id,
} => delete(body, mxc, event_id).await?,
MediaCommand::DeleteList => delete_list(body).await?,
MediaCommand::DeletePastRemoteMedia {
duration,
} => delete_past_remote_media(body, duration).await?,
})
}

View file

@ -26,11 +26,12 @@ use serde_json::value::to_raw_value;
use tokio::sync::Mutex;
use tracing::{error, warn};
use self::fsck::FsckCommand;
use super::pdu::PduBuilder;
use crate::{
service::admin::{
appservice::AppserviceCommand, debug::DebugCommand, federation::FederationCommand, media::MediaCommand,
query::query::QueryCommand, room::RoomCommand, server::ServerCommand, user::UserCommand,
query::QueryCommand, room::RoomCommand, server::ServerCommand, user::UserCommand,
},
services, Error, Result,
};
@ -38,12 +39,10 @@ use crate::{
pub(crate) mod appservice;
pub(crate) mod debug;
pub(crate) mod federation;
pub(crate) mod fsck;
pub(crate) mod media;
pub(crate) mod query;
pub(crate) mod room;
pub(crate) mod room_alias;
pub(crate) mod room_directory;
pub(crate) mod room_moderation;
pub(crate) mod server;
pub(crate) mod user;
@ -84,6 +83,10 @@ enum AdminCommand {
#[command(subcommand)]
/// - Query all the database getters and iterators
Query(QueryCommand),
#[command(subcommand)]
/// - Query all the database getters and iterators
Fsck(FsckCommand),
}
#[derive(Debug)]
@ -284,7 +287,8 @@ impl Service {
AdminCommand::Federation(command) => federation::process(command, body).await?,
AdminCommand::Server(command) => server::process(command, body).await?,
AdminCommand::Debug(command) => debug::process(command, body).await?,
AdminCommand::Query(command) => query::query::process(command, body).await?,
AdminCommand::Query(command) => query::process(command, body).await?,
AdminCommand::Fsck(command) => fsck::process(command, body).await?,
};
Ok(reply_message_content)

View file

@ -1,38 +1,10 @@
use clap::Subcommand;
use ruma::{
events::{room::message::RoomMessageEventContent, RoomAccountDataEventType},
RoomId, UserId,
};
use ruma::events::room::message::RoomMessageEventContent;
use super::AccountData;
use crate::{services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/account_data.rs
pub(crate) enum AccountData {
/// - Returns all changes to the account data that happened after `since`.
ChangesSince {
/// Full user ID
user_id: Box<UserId>,
/// UNIX timestamp since (u64)
since: u64,
/// Optional room ID of the account data
room_id: Option<Box<RoomId>>,
},
/// - Searches the account data for a specific kind.
Get {
/// Full user ID
user_id: Box<UserId>,
/// Account data event type
kind: RoomAccountDataEventType,
/// Optional room ID of the account data
room_id: Option<Box<RoomId>>,
},
}
/// All the getters and iterators from src/database/key_value/account_data.rs
pub(crate) async fn account_data(subcommand: AccountData) -> Result<RoomMessageEventContent> {
pub(super) async fn account_data(subcommand: AccountData) -> Result<RoomMessageEventContent> {
match subcommand {
AccountData::ChangesSince {
user_id,

View file

@ -1,21 +1,10 @@
use clap::Subcommand;
use ruma::events::room::message::RoomMessageEventContent;
use super::Appservice;
use crate::{services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/appservice.rs
pub(crate) enum Appservice {
/// - Gets the appservice registration info/details from the ID as a string
GetRegistration {
/// Appservice registration ID
appservice_id: Box<str>,
},
}
/// All the getters and iterators from src/database/key_value/appservice.rs
pub(crate) async fn appservice(subcommand: Appservice) -> Result<RoomMessageEventContent> {
pub(super) async fn appservice(subcommand: Appservice) -> Result<RoomMessageEventContent> {
match subcommand {
Appservice::GetRegistration {
appservice_id,

View file

@ -1,29 +1,10 @@
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, ServerName};
use ruma::events::room::message::RoomMessageEventContent;
use super::Globals;
use crate::{services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/globals.rs
pub(crate) enum Globals {
DatabaseVersion,
CurrentCount,
LastCheckForUpdatesId,
LoadKeypair,
/// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
/// for the server.
SigningKeysFor {
origin: Box<ServerName>,
},
}
/// All the getters and iterators from src/database/key_value/globals.rs
pub(crate) async fn globals(subcommand: Globals) -> Result<RoomMessageEventContent> {
pub(super) async fn globals(subcommand: Globals) -> Result<RoomMessageEventContent> {
match subcommand {
Globals::DatabaseVersion => {
let timer = tokio::time::Instant::now();

View file

@ -1,8 +1,175 @@
#[allow(clippy::module_inception)]
pub(crate) mod query;
pub(crate) mod account_data;
pub(crate) mod appservice;
pub(crate) mod globals;
pub(crate) mod presence;
pub(crate) mod room_alias;
pub(crate) mod sending;
pub(crate) mod users;
use clap::Subcommand;
use ruma::{
events::{room::message::RoomMessageEventContent, RoomAccountDataEventType},
RoomAliasId, RoomId, ServerName, UserId,
};
use self::{
account_data::account_data, appservice::appservice, globals::globals, presence::presence, room_alias::room_alias,
sending::sending, users::users,
};
use crate::Result;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// Query tables from database
pub(crate) enum QueryCommand {
/// - account_data.rs iterators and getters
#[command(subcommand)]
AccountData(AccountData),
/// - appservice.rs iterators and getters
#[command(subcommand)]
Appservice(Appservice),
/// - presence.rs iterators and getters
#[command(subcommand)]
Presence(Presence),
/// - rooms/alias.rs iterators and getters
#[command(subcommand)]
RoomAlias(RoomAlias),
/// - globals.rs iterators and getters
#[command(subcommand)]
Globals(Globals),
/// - sending.rs iterators and getters
#[command(subcommand)]
Sending(Sending),
/// - users.rs iterators and getters
#[command(subcommand)]
Users(Users),
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/account_data.rs
pub(crate) enum AccountData {
/// - Returns all changes to the account data that happened after `since`.
ChangesSince {
/// Full user ID
user_id: Box<UserId>,
/// UNIX timestamp since (u64)
since: u64,
/// Optional room ID of the account data
room_id: Option<Box<RoomId>>,
},
/// - Searches the account data for a specific kind.
Get {
/// Full user ID
user_id: Box<UserId>,
/// Account data event type
kind: RoomAccountDataEventType,
/// Optional room ID of the account data
room_id: Option<Box<RoomId>>,
},
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/appservice.rs
pub(crate) enum Appservice {
/// - Gets the appservice registration info/details from the ID as a string
GetRegistration {
/// Appservice registration ID
appservice_id: Box<str>,
},
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/presence.rs
pub(crate) enum Presence {
/// - Returns the latest presence event for the given user.
GetPresence {
/// Full user ID
user_id: Box<UserId>,
},
/// - Iterator of the most recent presence updates that happened after the
/// event with id `since`.
PresenceSince {
/// UNIX timestamp since (u64)
since: u64,
},
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
pub(crate) enum RoomAlias {
ResolveLocalAlias {
/// Full room alias
alias: Box<RoomAliasId>,
},
/// - Iterator of all our local room aliases for the room ID
LocalAliasesForRoom {
/// Full room ID
room_id: Box<RoomId>,
},
/// - Iterator of all our local aliases in our database with their room IDs
AllLocalAliases,
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/globals.rs
pub(crate) enum Globals {
DatabaseVersion,
CurrentCount,
LastCheckForUpdatesId,
LoadKeypair,
/// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
/// for the server.
SigningKeysFor {
origin: Box<ServerName>,
},
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/sending.rs
pub(crate) enum Sending {
ActiveRequests,
GetLatestEduCount {
server_name: Box<ServerName>,
},
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/users.rs
pub(crate) enum Users {
Iter,
}
/// Processes admin query commands
pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
QueryCommand::AccountData(command) => account_data(command).await?,
QueryCommand::Appservice(command) => appservice(command).await?,
QueryCommand::Presence(command) => presence(command).await?,
QueryCommand::RoomAlias(command) => room_alias(command).await?,
QueryCommand::Globals(command) => globals(command).await?,
QueryCommand::Sending(command) => sending(command).await?,
QueryCommand::Users(command) => users(command).await?,
})
}

View file

@ -1,28 +1,10 @@
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, UserId};
use ruma::events::room::message::RoomMessageEventContent;
use super::Presence;
use crate::{services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/presence.rs
pub(crate) enum Presence {
/// - Returns the latest presence event for the given user.
GetPresence {
/// Full user ID
user_id: Box<UserId>,
},
/// - Iterator of the most recent presence updates that happened after the
/// event with id `since`.
PresenceSince {
/// UNIX timestamp since (u64)
since: u64,
},
}
/// All the getters and iterators in key_value/presence.rs
pub(crate) async fn presence(subcommand: Presence) -> Result<RoomMessageEventContent> {
pub(super) async fn presence(subcommand: Presence) -> Result<RoomMessageEventContent> {
match subcommand {
Presence::GetPresence {
user_id,

View file

@ -1,48 +0,0 @@
use clap::Subcommand;
use ruma::events::room::message::RoomMessageEventContent;
use super::{
account_data::{account_data, AccountData},
appservice::{appservice, Appservice},
globals::{globals, Globals},
presence::{presence, Presence},
room_alias::{room_alias, RoomAlias},
};
use crate::Result;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// Query tables from database
pub(crate) enum QueryCommand {
/// - account_data.rs iterators and getters
#[command(subcommand)]
AccountData(AccountData),
/// - appservice.rs iterators and getters
#[command(subcommand)]
Appservice(Appservice),
/// - presence.rs iterators and getters
#[command(subcommand)]
Presence(Presence),
/// - rooms/alias.rs iterators and getters
#[command(subcommand)]
RoomAlias(RoomAlias),
/// - globals.rs iterators and getters
#[command(subcommand)]
Globals(Globals),
}
/// Processes admin query commands
#[allow(non_snake_case)]
pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
QueryCommand::AccountData(AccountData) => account_data(AccountData).await,
QueryCommand::Appservice(Appservice) => appservice(Appservice).await,
QueryCommand::Presence(Presence) => presence(Presence).await,
QueryCommand::RoomAlias(RoomAlias) => room_alias(RoomAlias).await,
QueryCommand::Globals(Globals) => globals(Globals).await,
}
}

View file

@ -1,29 +1,10 @@
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId};
use ruma::events::room::message::RoomMessageEventContent;
use super::RoomAlias;
use crate::{services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
pub(crate) enum RoomAlias {
ResolveLocalAlias {
/// Full room alias
alias: Box<RoomAliasId>,
},
/// - Iterator of all our local room aliases for the room ID
LocalAliasesForRoom {
/// Full room ID
room_id: Box<RoomId>,
},
/// - Iterator of all our local aliases in our database with their room IDs
AllLocalAliases,
}
/// All the getters and iterators in src/database/key_value/rooms/alias.rs
pub(crate) async fn room_alias(subcommand: RoomAlias) -> Result<RoomMessageEventContent> {
pub(super) async fn room_alias(subcommand: RoomAlias) -> Result<RoomMessageEventContent> {
match subcommand {
RoomAlias::ResolveLocalAlias {
alias,

View file

@ -0,0 +1,40 @@
use ruma::events::room::message::RoomMessageEventContent;
use super::Sending;
use crate::{services, Result};
/// All the getters and iterators in key_value/sending.rs
pub(super) async fn sending(subcommand: Sending) -> Result<RoomMessageEventContent> {
match subcommand {
Sending::ActiveRequests => {
let timer = tokio::time::Instant::now();
let results = services().sending.db.active_requests();
let query_time = timer.elapsed();
let active_requests: Result<Vec<(_, _, _)>> = results.collect();
Ok(RoomMessageEventContent::text_html(
format!("Query completed in {query_time:?}:\n\n```\n{:?}```", active_requests),
format!(
"<p>Query completed in {query_time:?}:</p>\n<pre><code>{:?}\n</code></pre>",
active_requests
),
))
},
Sending::GetLatestEduCount {
server_name,
} => {
let timer = tokio::time::Instant::now();
let results = services().sending.db.get_latest_educount(&server_name);
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::text_html(
format!("Query completed in {query_time:?}:\n\n```\n{:?}```", results),
format!(
"<p>Query completed in {query_time:?}:</p>\n<pre><code>{:?}\n</code></pre>",
results
),
))
},
}
}

View file

@ -0,0 +1,25 @@
use ruma::events::room::message::RoomMessageEventContent;
use super::Users;
use crate::{services, Result};
/// All the getters and iterators in key_value/users.rs
pub(super) async fn users(subcommand: Users) -> Result<RoomMessageEventContent> {
match subcommand {
Users::Iter => {
let timer = tokio::time::Instant::now();
let results = services().users.db.iter();
let query_time = timer.elapsed();
let users = results.collect::<Vec<_>>();
Ok(RoomMessageEventContent::text_html(
format!("Query completed in {query_time:?}:\n\n```\n{:?}```", users),
format!(
"<p>Query completed in {query_time:?}:</p>\n<pre><code>{:?}\n</code></pre>",
users
),
))
},
}
}

View file

@ -1,96 +0,0 @@
use std::fmt::Write as _;
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId};
use crate::{
service::admin::{
escape_html, get_room_info, room_alias, room_alias::RoomAliasCommand, room_directory,
room_directory::RoomDirectoryCommand, room_moderation, room_moderation::RoomModerationCommand, PAGE_SIZE,
},
services, Result,
};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum RoomCommand {
/// - List all rooms the server knows about
List {
page: Option<usize>,
},
#[command(subcommand)]
/// - Manage moderation of remote or local rooms
Moderation(RoomModerationCommand),
#[command(subcommand)]
/// - Manage rooms' aliases
Alias(RoomAliasCommand),
#[command(subcommand)]
/// - Manage the room directory
Directory(RoomDirectoryCommand),
}
pub(crate) async fn process(command: RoomCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
RoomCommand::Alias(command) => room_alias::process(command, body).await,
RoomCommand::Directory(command) => room_directory::process(command, body).await,
RoomCommand::Moderation(command) => room_moderation::process(command, body).await,
RoomCommand::List {
page,
} => {
// TODO: i know there's a way to do this with clap, but i can't seem to find it
let page = page.unwrap_or(1);
let mut rooms = services()
.rooms
.metadata
.iter_ids()
.filter_map(Result::ok)
.map(|id: OwnedRoomId| get_room_info(&id))
.collect::<Vec<_>>();
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let rooms = rooms
.into_iter()
.skip(page.saturating_sub(1) * PAGE_SIZE)
.take(PAGE_SIZE)
.collect::<Vec<_>>();
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
};
let output_plain = format!(
"Rooms:\n{}",
rooms
.iter()
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
.collect::<Vec<_>>()
.join("\n")
);
let output_html = format!(
"<table><caption>Room list - page \
{page}</caption>\n<tr><th>id</th>\t<th>members</th>\t<th>name</th></tr>\n{}</table>",
rooms
.iter()
.fold(String::new(), |mut output, (id, members, name)| {
writeln!(
output,
"<tr><td>{}</td>\t<td>{}</td>\t<td>{}</td></tr>",
escape_html(id.as_ref()),
members,
escape_html(name)
)
.unwrap();
output
})
);
Ok(RoomMessageEventContent::text_html(output_plain, output_html))
},
}
}

View file

@ -0,0 +1,160 @@
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, RoomId, RoomOrAliasId};
use self::room_commands::list;
use crate::Result;
pub(crate) mod room_alias_commands;
pub(crate) mod room_commands;
pub(crate) mod room_directory_commands;
pub(crate) mod room_moderation_commands;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum RoomCommand {
/// - List all rooms the server knows about
List {
page: Option<usize>,
},
#[command(subcommand)]
/// - Manage moderation of remote or local rooms
Moderation(RoomModerationCommand),
#[command(subcommand)]
/// - Manage rooms' aliases
Alias(RoomAliasCommand),
#[command(subcommand)]
/// - Manage the room directory
Directory(RoomDirectoryCommand),
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum RoomAliasCommand {
/// - Make an alias point to a room.
Set {
#[arg(short, long)]
/// Set the alias even if a room is already using it
force: bool,
/// The room id to set the alias on
room_id: Box<RoomId>,
/// The alias localpart to use (`alias`, not `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - Remove an alias
Remove {
/// The alias localpart to remove (`alias`, not `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - Show which room is using an alias
Which {
/// The alias localpart to look up (`alias`, not
/// `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - List aliases currently being used
List {
/// If set, only list the aliases for this room
room_id: Option<Box<RoomId>>,
},
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum RoomDirectoryCommand {
/// - Publish a room to the room directory
Publish {
/// The room id of the room to publish
room_id: Box<RoomId>,
},
/// - Unpublish a room to the room directory
Unpublish {
/// The room id of the room to unpublish
room_id: Box<RoomId>,
},
/// - List rooms that are published
List {
page: Option<usize>,
},
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum RoomModerationCommand {
/// - Bans a room from local users joining and evicts all our local users
/// from the room. Also blocks any invites (local and remote) for the
/// banned room.
///
/// Server admins (users in the conduwuit admin room) will not be evicted
/// and server admins can still join the room. To evict admins too, use
/// --force (also ignores errors) To disable incoming federation of the
/// room, use --disable-federation
BanRoom {
#[arg(short, long)]
/// Evicts admins out of the room and ignores any potential errors when
/// making our local users leave the room
force: bool,
#[arg(long)]
/// Disables incoming federation of the room after banning and evicting
/// users
disable_federation: bool,
/// The room in the format of `!roomid:example.com` or a room alias in
/// the format of `#roomalias:example.com`
room: Box<RoomOrAliasId>,
},
/// - Bans a list of rooms (room IDs and room aliases) from a newline
/// delimited codeblock similar to `user deactivate-all`
BanListOfRooms {
#[arg(short, long)]
/// Evicts admins out of the room and ignores any potential errors when
/// making our local users leave the room
force: bool,
#[arg(long)]
/// Disables incoming federation of the room after banning and evicting
/// users
disable_federation: bool,
},
/// - Unbans a room to allow local users to join again
///
/// To re-enable incoming federation of the room, use --enable-federation
UnbanRoom {
#[arg(long)]
/// Enables incoming federation of the room after unbanning
enable_federation: bool,
/// The room in the format of `!roomid:example.com` or a room alias in
/// the format of `#roomalias:example.com`
room: Box<RoomOrAliasId>,
},
/// - List of all rooms we have banned
ListBannedRooms,
}
pub(crate) async fn process(command: RoomCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
RoomCommand::Alias(command) => room_alias_commands::process(command, body).await?,
RoomCommand::Directory(command) => room_directory_commands::process(command, body).await?,
RoomCommand::Moderation(command) => room_moderation_commands::process(command, body).await?,
RoomCommand::List {
page,
} => list(body, page).await?,
})
}

View file

@ -1,46 +1,10 @@
use std::fmt::Write as _;
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId};
use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId};
use super::RoomAliasCommand;
use crate::{service::admin::escape_html, services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum RoomAliasCommand {
/// - Make an alias point to a room.
Set {
#[arg(short, long)]
/// Set the alias even if a room is already using it
force: bool,
/// The room id to set the alias on
room_id: Box<RoomId>,
/// The alias localpart to use (`alias`, not `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - Remove an alias
Remove {
/// The alias localpart to remove (`alias`, not `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - Show which room is using an alias
Which {
/// The alias localpart to look up (`alias`, not
/// `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - List aliases currently being used
List {
/// If set, only list the aliases for this room
room_id: Option<Box<RoomId>>,
},
}
pub(crate) async fn process(command: RoomAliasCommand, _body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
RoomAliasCommand::Set {

View file

@ -0,0 +1,59 @@
use std::fmt::Write as _;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId};
use crate::{
service::admin::{escape_html, get_room_info, PAGE_SIZE},
services, Result,
};
pub(super) async fn list(_body: Vec<&str>, page: Option<usize>) -> Result<RoomMessageEventContent> {
// TODO: i know there's a way to do this with clap, but i can't seem to find it
let page = page.unwrap_or(1);
let mut rooms = services()
.rooms
.metadata
.iter_ids()
.filter_map(Result::ok)
.map(|id: OwnedRoomId| get_room_info(&id))
.collect::<Vec<_>>();
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let rooms = rooms
.into_iter()
.skip(page.saturating_sub(1) * PAGE_SIZE)
.take(PAGE_SIZE)
.collect::<Vec<_>>();
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
};
let output_plain = format!(
"Rooms:\n{}",
rooms
.iter()
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
.collect::<Vec<_>>()
.join("\n")
);
let output_html = format!(
"<table><caption>Room list - page \
{page}</caption>\n<tr><th>id</th>\t<th>members</th>\t<th>name</th></tr>\n{}</table>",
rooms
.iter()
.fold(String::new(), |mut output, (id, members, name)| {
writeln!(
output,
"<tr><td>{}</td>\t<td>{}</td>\t<td>{}</td></tr>",
escape_html(id.as_ref()),
members,
escape_html(name)
)
.unwrap();
output
})
);
Ok(RoomMessageEventContent::text_html(output_plain, output_html))
}

View file

@ -1,47 +1,26 @@
use std::fmt::Write as _;
use clap::Subcommand;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId};
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId};
use super::RoomDirectoryCommand;
use crate::{
service::admin::{escape_html, get_room_info, PAGE_SIZE},
services, Result,
};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum RoomDirectoryCommand {
/// - Publish a room to the room directory
Publish {
/// The room id of the room to publish
room_id: Box<RoomId>,
},
/// - Unpublish a room to the room directory
Unpublish {
/// The room id of the room to unpublish
room_id: Box<RoomId>,
},
/// - List rooms that are published
List {
page: Option<usize>,
},
}
pub(crate) async fn process(command: RoomDirectoryCommand, _body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
RoomDirectoryCommand::Publish {
room_id,
} => match services().rooms.directory.set_public(&room_id) {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Room published")),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {}", err))),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {err}"))),
},
RoomDirectoryCommand::Unpublish {
room_id,
} => match services().rooms.directory.set_not_public(&room_id) {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Room unpublished")),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {}", err))),
Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {err}"))),
},
RoomDirectoryCommand::List {
page,

View file

@ -1,75 +1,17 @@
use std::fmt::Write as _;
use clap::Subcommand;
use ruma::{
events::room::message::RoomMessageEventContent, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId,
};
use tracing::{debug, error, info, warn};
use super::RoomModerationCommand;
use crate::{
api::client_server::{get_alias_helper, leave_room},
service::admin::{escape_html, Service},
services, Result,
};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum RoomModerationCommand {
/// - Bans a room from local users joining and evicts all our local users
/// from the room. Also blocks any invites (local and remote) for the
/// banned room.
///
/// Server admins (users in the conduwuit admin room) will not be evicted
/// and server admins can still join the room. To evict admins too, use
/// --force (also ignores errors) To disable incoming federation of the
/// room, use --disable-federation
BanRoom {
#[arg(short, long)]
/// Evicts admins out of the room and ignores any potential errors when
/// making our local users leave the room
force: bool,
#[arg(long)]
/// Disables incoming federation of the room after banning and evicting
/// users
disable_federation: bool,
/// The room in the format of `!roomid:example.com` or a room alias in
/// the format of `#roomalias:example.com`
room: Box<RoomOrAliasId>,
},
/// - Bans a list of rooms (room IDs and room aliases) from a newline
/// delimited codeblock similar to `user deactivate-all`
BanListOfRooms {
#[arg(short, long)]
/// Evicts admins out of the room and ignores any potential errors when
/// making our local users leave the room
force: bool,
#[arg(long)]
/// Disables incoming federation of the room after banning and evicting
/// users
disable_federation: bool,
},
/// - Unbans a room to allow local users to join again
///
/// To re-enable incoming federation of the room, use --enable-federation
UnbanRoom {
#[arg(long)]
/// Enables incoming federation of the room after unbanning
enable_federation: bool,
/// The room in the format of `!roomid:example.com` or a room alias in
/// the format of `#roomalias:example.com`
room: Box<RoomOrAliasId>,
},
/// - List of all rooms we have banned
ListBannedRooms,
}
pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
RoomModerationCommand::BanRoom {
@ -129,7 +71,7 @@ pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) ->
federation"
);
match get_alias_helper(room_alias).await {
match get_alias_helper(room_alias, None).await {
Ok(response) => {
debug!("Got federation response fetching room ID for room {room}: {:?}", response);
response.room_id
@ -291,7 +233,7 @@ pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) ->
fetch room ID over federation"
);
match get_alias_helper(room_alias).await {
match get_alias_helper(room_alias, None).await {
Ok(response) => {
debug!(
"Got federation response fetching room ID for room {room}: \
@ -490,7 +432,7 @@ pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) ->
federation"
);
match get_alias_helper(room_alias).await {
match get_alias_helper(room_alias, None).await {
Ok(response) => {
debug!("Got federation response fetching room ID for room {room}: {:?}", response);
response.room_id

View file

@ -1,106 +0,0 @@
use clap::Subcommand;
use ruma::events::room::message::RoomMessageEventContent;
use crate::{services, Result};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum ServerCommand {
/// - Show configuration values
ShowConfig,
/// - Print database memory usage statistics
MemoryUsage,
/// - Clears all of Conduit's database caches with index smaller than the
/// amount
ClearDatabaseCaches {
amount: u32,
},
/// - Clears all of Conduit's service caches with index smaller than the
/// amount
ClearServiceCaches {
amount: u32,
},
/// - Performs an online backup of the database (only available for RocksDB
/// at the moment)
BackupDatabase,
/// - List database backups
ListBackups,
/// - List database files
ListDatabaseFiles,
}
pub(crate) async fn process(command: ServerCommand, _body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
ServerCommand::ShowConfig => {
// Construct and send the response
Ok(RoomMessageEventContent::text_plain(format!("{}", services().globals.config)))
},
ServerCommand::MemoryUsage => {
let response1 = services().memory_usage().await;
let response2 = services().globals.db.memory_usage();
Ok(RoomMessageEventContent::text_plain(format!(
"Services:\n{response1}\n\nDatabase:\n{response2}"
)))
},
ServerCommand::ClearDatabaseCaches {
amount,
} => {
services().globals.db.clear_caches(amount);
Ok(RoomMessageEventContent::text_plain("Done."))
},
ServerCommand::ClearServiceCaches {
amount,
} => {
services().clear_caches(amount).await;
Ok(RoomMessageEventContent::text_plain("Done."))
},
ServerCommand::ListBackups => {
let result = services().globals.db.backup_list()?;
if result.is_empty() {
Ok(RoomMessageEventContent::text_plain("No backups found."))
} else {
Ok(RoomMessageEventContent::text_plain(result))
}
},
ServerCommand::BackupDatabase => {
if !cfg!(feature = "rocksdb") {
return Ok(RoomMessageEventContent::text_plain(
"Only RocksDB supports online backups in conduwuit.",
));
}
let mut result = tokio::task::spawn_blocking(move || match services().globals.db.backup() {
Ok(()) => String::new(),
Err(e) => (*e).to_string(),
})
.await
.unwrap();
if result.is_empty() {
result = services().globals.db.backup_list()?;
}
Ok(RoomMessageEventContent::text_plain(&result))
},
ServerCommand::ListDatabaseFiles => {
if !cfg!(feature = "rocksdb") {
return Ok(RoomMessageEventContent::text_plain(
"Only RocksDB supports listing files in conduwuit.",
));
}
let result = services().globals.db.file_list()?;
Ok(RoomMessageEventContent::notice_html(String::new(), result))
},
}
}

View file

@ -0,0 +1,58 @@
pub(crate) mod server_commands;
use clap::Subcommand;
use ruma::events::room::message::RoomMessageEventContent;
use self::server_commands::{
backup_database, clear_database_caches, clear_service_caches, list_backups, list_database_files, memory_usage,
show_config,
};
use crate::Result;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum ServerCommand {
/// - Show configuration values
ShowConfig,
/// - Print database memory usage statistics
MemoryUsage,
/// - Clears all of Conduit's database caches with index smaller than the
/// amount
ClearDatabaseCaches {
amount: u32,
},
/// - Clears all of Conduit's service caches with index smaller than the
/// amount
ClearServiceCaches {
amount: u32,
},
/// - Performs an online backup of the database (only available for RocksDB
/// at the moment)
BackupDatabase,
/// - List database backups
ListBackups,
/// - List database files
ListDatabaseFiles,
}
pub(crate) async fn process(command: ServerCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
ServerCommand::ShowConfig => show_config(body).await?,
ServerCommand::MemoryUsage => memory_usage(body).await?,
ServerCommand::ClearDatabaseCaches {
amount,
} => clear_database_caches(body, amount).await?,
ServerCommand::ClearServiceCaches {
amount,
} => clear_service_caches(body, amount).await?,
ServerCommand::ListBackups => list_backups(body).await?,
ServerCommand::BackupDatabase => backup_database(body).await?,
ServerCommand::ListDatabaseFiles => list_database_files(body).await?,
})
}

View file

@ -0,0 +1,71 @@
use ruma::events::room::message::RoomMessageEventContent;
use crate::{services, Result};
pub(super) async fn show_config(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
// Construct and send the response
Ok(RoomMessageEventContent::text_plain(format!("{}", services().globals.config)))
}
pub(super) async fn memory_usage(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let response1 = services().memory_usage().await;
let response2 = services().globals.db.memory_usage();
Ok(RoomMessageEventContent::text_plain(format!(
"Services:\n{response1}\n\nDatabase:\n{response2}"
)))
}
pub(super) async fn clear_database_caches(_body: Vec<&str>, amount: u32) -> Result<RoomMessageEventContent> {
services().globals.db.clear_caches(amount);
Ok(RoomMessageEventContent::text_plain("Done."))
}
pub(super) async fn clear_service_caches(_body: Vec<&str>, amount: u32) -> Result<RoomMessageEventContent> {
services().clear_caches(amount).await;
Ok(RoomMessageEventContent::text_plain("Done."))
}
pub(super) async fn list_backups(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let result = services().globals.db.backup_list()?;
if result.is_empty() {
Ok(RoomMessageEventContent::text_plain("No backups found."))
} else {
Ok(RoomMessageEventContent::text_plain(result))
}
}
pub(super) async fn backup_database(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
if !cfg!(feature = "rocksdb") {
return Ok(RoomMessageEventContent::text_plain(
"Only RocksDB supports online backups in conduwuit.",
));
}
let mut result = tokio::task::spawn_blocking(move || match services().globals.db.backup() {
Ok(()) => String::new(),
Err(e) => (*e).to_string(),
})
.await
.unwrap();
if result.is_empty() {
result = services().globals.db.backup_list()?;
}
Ok(RoomMessageEventContent::text_plain(&result))
}
pub(super) async fn list_database_files(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
if !cfg!(feature = "rocksdb") {
return Ok(RoomMessageEventContent::text_plain(
"Only RocksDB supports listing files in conduwuit.",
));
}
let result = services().globals.db.file_list()?;
Ok(RoomMessageEventContent::notice_html(String::new(), result))
}

View file

@ -1,406 +0,0 @@
use std::{fmt::Write as _, sync::Arc};
use clap::Subcommand;
use itertools::Itertools;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, UserId};
use tracing::{error, info, warn};
use crate::{
api::client_server::{join_room_by_id_helper, leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
service::admin::{escape_html, get_room_info},
services, utils, Result,
};
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum UserCommand {
/// - Create a new user
Create {
/// Username of the new user
username: String,
/// Password of the new user, if unspecified one is generated
password: Option<String>,
},
/// - Reset user password
ResetPassword {
/// Username of the user for whom the password should be reset
username: String,
},
/// - Deactivate a user
///
/// User will not be removed from all rooms by default.
/// Use --leave-rooms to force the user to leave all rooms
Deactivate {
#[arg(short, long)]
leave_rooms: bool,
user_id: Box<UserId>,
},
/// - Deactivate a list of users
///
/// Recommended to use in conjunction with list-local-users.
///
/// Users will not be removed from joined rooms by default.
/// Can be overridden with --leave-rooms flag.
/// Removing a mass amount of users from a room may cause a significant
/// amount of leave events. The time to leave rooms may depend significantly
/// on joined rooms and servers.
///
/// This command needs a newline separated list of users provided in a
/// Markdown code block below the command.
DeactivateAll {
#[arg(short, long)]
/// Remove users from their joined rooms
leave_rooms: bool,
#[arg(short, long)]
/// Also deactivate admin accounts
force: bool,
},
/// - List local users in the database
List,
/// - Lists all the rooms (local and remote) that the specified user is
/// joined in
ListJoinedRooms {
user_id: Box<UserId>,
},
}
pub(crate) async fn process(command: UserCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
match command {
UserCommand::List => match services().users.list_local_users() {
Ok(users) => {
let mut msg = format!("Found {} local user account(s):\n", users.len());
msg += &users.join("\n");
Ok(RoomMessageEventContent::text_plain(&msg))
},
Err(e) => Ok(RoomMessageEventContent::text_plain(e.to_string())),
},
UserCommand::Create {
username,
password,
} => {
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
// Validate user id
let user_id = match UserId::parse_with_server_name(
username.as_str().to_lowercase(),
services().globals.server_name(),
) {
Ok(id) => id,
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"The supplied username is not a valid username: {e}"
)))
},
};
if user_id.is_historical() {
return Ok(RoomMessageEventContent::text_plain(format!(
"Userid {user_id} is not allowed due to historical"
)));
}
if services().users.exists(&user_id)? {
return Ok(RoomMessageEventContent::text_plain(format!("Userid {user_id} already exists")));
}
// Create user
services().users.create(&user_id, Some(password.as_str()))?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
// If `new_user_displayname_suffix` is set, registration will push whatever
// content is set to the user's display name with a space before it
if !services().globals.new_user_displayname_suffix().is_empty() {
displayname.push_str(&(" ".to_owned() + services().globals.new_user_displayname_suffix()));
}
services()
.users
.set_displayname(&user_id, Some(displayname))
.await?;
// Initial account data
services().account_data.update(
None,
&user_id,
ruma::events::GlobalAccountDataEventType::PushRules
.to_string()
.into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: ruma::push::Ruleset::server_default(&user_id),
},
})
.expect("to json value always works"),
)?;
if !services().globals.config.auto_join_rooms.is_empty() {
for room in &services().globals.config.auto_join_rooms {
if !services()
.rooms
.state_cache
.server_in_room(services().globals.server_name(), room)?
{
warn!("Skipping room {room} to automatically join as we have never joined before.");
continue;
}
if let Some(room_id_server_name) = room.server_name() {
match join_room_by_id_helper(
Some(&user_id),
room,
Some("Automatically joining this room upon registration".to_owned()),
&[room_id_server_name.to_owned(), services().globals.server_name().to_owned()],
None,
)
.await
{
Ok(_) => {
info!("Automatically joined room {room} for user {user_id}");
},
Err(e) => {
// don't return this error so we don't fail registrations
error!("Failed to automatically join room {room} for user {user_id}: {e}");
},
};
}
}
}
// we dont add a device since we're not the user, just the creator
// Inhibit login does not work for guests
Ok(RoomMessageEventContent::text_plain(format!(
"Created user with user_id: {user_id} and password: `{password}`"
)))
},
UserCommand::Deactivate {
leave_rooms,
user_id,
} => {
let user_id = Arc::<UserId>::from(user_id);
// check if user belongs to our server
if user_id.server_name() != services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} does not belong to our server."
)));
}
// don't deactivate the conduit service account
if user_id
== UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("conduit user exists")
{
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to deactivate the Conduit service account.",
));
}
if services().users.exists(&user_id)? {
RoomMessageEventContent::text_plain(format!("Making {user_id} leave all rooms before deactivation..."));
services().users.deactivate_account(&user_id)?;
if leave_rooms {
leave_all_rooms(&user_id).await?;
}
Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} has been deactivated"
)))
} else {
Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} doesn't exist on this server"
)))
}
},
UserCommand::ResetPassword {
username,
} => {
let user_id = match UserId::parse_with_server_name(
username.as_str().to_lowercase(),
services().globals.server_name(),
) {
Ok(id) => id,
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"The supplied username is not a valid username: {e}"
)))
},
};
// check if user belongs to our server
if user_id.server_name() != services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} does not belong to our server."
)));
}
// Check if the specified user is valid
if !services().users.exists(&user_id)?
|| user_id
== UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("conduit user exists")
{
return Ok(RoomMessageEventContent::text_plain("The specified user does not exist!"));
}
let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH);
match services()
.users
.set_password(&user_id, Some(new_password.as_str()))
{
Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
"Successfully reset the password for user {user_id}: `{new_password}`"
))),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Couldn't reset the password for user {user_id}: {e}"
))),
}
},
UserCommand::DeactivateAll {
leave_rooms,
force,
} => {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let usernames = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let mut user_ids: Vec<&UserId> = Vec::new();
for &username in &usernames {
match <&UserId>::try_from(username) {
Ok(user_id) => user_ids.push(user_id),
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"{username} is not a valid username: {e}"
)))
},
}
}
let mut deactivation_count = 0;
let mut admins = Vec::new();
if !force {
user_ids.retain(|&user_id| match services().users.is_admin(user_id) {
Ok(is_admin) => {
if is_admin {
admins.push(user_id.localpart());
false
} else {
true
}
},
Err(_) => false,
});
}
for &user_id in &user_ids {
// check if user belongs to our server and skips over non-local users
if user_id.server_name() != services().globals.server_name() {
continue;
}
// don't deactivate the conduit service account
if user_id
== UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("conduit user exists")
{
continue;
}
// user does not exist on our server
if !services().users.exists(user_id)? {
continue;
}
if services().users.deactivate_account(user_id).is_ok() {
deactivation_count += 1;
}
}
if leave_rooms {
for &user_id in &user_ids {
_ = leave_all_rooms(user_id).await;
}
}
if admins.is_empty() {
Ok(RoomMessageEventContent::text_plain(format!(
"Deactivated {deactivation_count} accounts."
)))
} else {
Ok(RoomMessageEventContent::text_plain(format!(
"Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin \
accounts",
deactivation_count,
admins.join(", ")
)))
}
} else {
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
},
UserCommand::ListJoinedRooms {
user_id,
} => {
if user_id.server_name() != services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain("User does not belong to our server."));
}
if !services().users.exists(&user_id)? {
return Ok(RoomMessageEventContent::text_plain("User does not exist on this server."));
}
let mut rooms: Vec<(OwnedRoomId, u64, String)> = services()
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(Result::ok)
.map(|room_id| get_room_info(&room_id))
.sorted_unstable()
.dedup()
.collect();
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
}
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let output_plain = format!(
"Rooms {user_id} Joined:\n{}",
rooms
.iter()
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
.collect::<Vec<_>>()
.join("\n")
);
let output_html = format!(
"<table><caption>Rooms {user_id} \
Joined</caption>\n<tr><th>id</th>\t<th>members</th>\t<th>name</th></tr>\n{}</table>",
rooms
.iter()
.fold(String::new(), |mut output, (id, members, name)| {
writeln!(
output,
"<tr><td>{}</td>\t<td>{}</td>\t<td>{}</td></tr>",
escape_html(id.as_ref()),
members,
escape_html(name)
)
.unwrap();
output
})
);
Ok(RoomMessageEventContent::text_html(output_plain, output_html))
},
}
}

View file

@ -0,0 +1,89 @@
pub(crate) mod user_commands;
use clap::Subcommand;
use ruma::events::room::message::RoomMessageEventContent;
use self::user_commands::{create, deactivate, deactivate_all, list, list_joined_rooms, reset_password};
use crate::Result;
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
pub(crate) enum UserCommand {
/// - Create a new user
Create {
/// Username of the new user
username: String,
/// Password of the new user, if unspecified one is generated
password: Option<String>,
},
/// - Reset user password
ResetPassword {
/// Username of the user for whom the password should be reset
username: String,
},
/// - Deactivate a user
///
/// User will not be removed from all rooms by default.
/// Use --leave-rooms to force the user to leave all rooms
Deactivate {
#[arg(short, long)]
leave_rooms: bool,
user_id: String,
},
/// - Deactivate a list of users
///
/// Recommended to use in conjunction with list-local-users.
///
/// Users will not be removed from joined rooms by default.
/// Can be overridden with --leave-rooms flag.
/// Removing a mass amount of users from a room may cause a significant
/// amount of leave events. The time to leave rooms may depend significantly
/// on joined rooms and servers.
///
/// This command needs a newline separated list of users provided in a
/// Markdown code block below the command.
DeactivateAll {
#[arg(short, long)]
/// Remove users from their joined rooms
leave_rooms: bool,
#[arg(short, long)]
/// Also deactivate admin accounts
force: bool,
},
/// - List local users in the database
List,
/// - Lists all the rooms (local and remote) that the specified user is
/// joined in
ListJoinedRooms {
user_id: String,
},
}
pub(crate) async fn process(command: UserCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
UserCommand::List => list(body).await?,
UserCommand::Create {
username,
password,
} => create(body, username, password).await?,
UserCommand::Deactivate {
leave_rooms,
user_id,
} => deactivate(body, leave_rooms, user_id).await?,
UserCommand::ResetPassword {
username,
} => reset_password(body, username).await?,
UserCommand::DeactivateAll {
leave_rooms,
force,
} => deactivate_all(body, leave_rooms, force).await?,
UserCommand::ListJoinedRooms {
user_id,
} => list_joined_rooms(body, user_id).await?,
})
}

View file

@ -0,0 +1,358 @@
use std::{fmt::Write as _, sync::Arc};
use itertools::Itertools;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, UserId};
use tracing::{error, info, warn};
use crate::{
api::client_server::{join_room_by_id_helper, leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
service::admin::{escape_html, get_room_info},
services, utils, Result,
};
pub(super) async fn list(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
match services().users.list_local_users() {
Ok(users) => {
let mut msg = format!("Found {} local user account(s):\n", users.len());
msg += &users.join("\n");
Ok(RoomMessageEventContent::text_plain(&msg))
},
Err(e) => Ok(RoomMessageEventContent::text_plain(e.to_string())),
}
}
pub(super) async fn create(
_body: Vec<&str>, username: String, password: Option<String>,
) -> Result<RoomMessageEventContent> {
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
// Validate user id
let user_id =
match UserId::parse_with_server_name(username.as_str().to_lowercase(), services().globals.server_name()) {
Ok(id) => id,
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"The supplied username is not a valid username: {e}"
)))
},
};
if user_id.is_historical() {
return Ok(RoomMessageEventContent::text_plain(format!(
"Userid {user_id} is not allowed due to historical"
)));
}
if services().users.exists(&user_id)? {
return Ok(RoomMessageEventContent::text_plain(format!("Userid {user_id} already exists")));
}
// Create user
services().users.create(&user_id, Some(password.as_str()))?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
// If `new_user_displayname_suffix` is set, registration will push whatever
// content is set to the user's display name with a space before it
if !services().globals.new_user_displayname_suffix().is_empty() {
displayname.push_str(&(" ".to_owned() + services().globals.new_user_displayname_suffix()));
}
services()
.users
.set_displayname(&user_id, Some(displayname))
.await?;
// Initial account data
services().account_data.update(
None,
&user_id,
ruma::events::GlobalAccountDataEventType::PushRules
.to_string()
.into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: ruma::push::Ruleset::server_default(&user_id),
},
})
.expect("to json value always works"),
)?;
if !services().globals.config.auto_join_rooms.is_empty() {
for room in &services().globals.config.auto_join_rooms {
if !services()
.rooms
.state_cache
.server_in_room(services().globals.server_name(), room)?
{
warn!("Skipping room {room} to automatically join as we have never joined before.");
continue;
}
if let Some(room_id_server_name) = room.server_name() {
match join_room_by_id_helper(
Some(&user_id),
room,
Some("Automatically joining this room upon registration".to_owned()),
&[room_id_server_name.to_owned(), services().globals.server_name().to_owned()],
None,
)
.await
{
Ok(_) => {
info!("Automatically joined room {room} for user {user_id}");
},
Err(e) => {
// don't return this error so we don't fail registrations
error!("Failed to automatically join room {room} for user {user_id}: {e}");
},
};
}
}
}
// we dont add a device since we're not the user, just the creator
// Inhibit login does not work for guests
Ok(RoomMessageEventContent::text_plain(format!(
"Created user with user_id: {user_id} and password: `{password}`"
)))
}
pub(super) async fn deactivate(
_body: Vec<&str>, leave_rooms: bool, user_id: String,
) -> Result<RoomMessageEventContent> {
// Validate user id
let user_id =
match UserId::parse_with_server_name(user_id.as_str().to_lowercase(), services().globals.server_name()) {
Ok(id) => Arc::<UserId>::from(id),
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"The supplied username is not a valid username: {e}"
)))
},
};
// check if user belongs to our server
if user_id.server_name() != services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} does not belong to our server."
)));
}
// don't deactivate the conduit service account
if user_id
== UserId::parse_with_server_name("conduit", services().globals.server_name()).expect("conduit user exists")
{
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to deactivate the Conduit service account.",
));
}
if services().users.exists(&user_id)? {
RoomMessageEventContent::text_plain(format!("Making {user_id} leave all rooms before deactivation..."));
services().users.deactivate_account(&user_id)?;
if leave_rooms {
leave_all_rooms(&user_id).await?;
}
Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} has been deactivated"
)))
} else {
Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} doesn't exist on this server"
)))
}
}
pub(super) async fn reset_password(_body: Vec<&str>, username: String) -> Result<RoomMessageEventContent> {
// Validate user id
let user_id =
match UserId::parse_with_server_name(username.as_str().to_lowercase(), services().globals.server_name()) {
Ok(id) => Arc::<UserId>::from(id),
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"The supplied username is not a valid username: {e}"
)))
},
};
// check if user belongs to our server
if user_id.server_name() != services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} does not belong to our server."
)));
}
// Check if the specified user is valid
if !services().users.exists(&user_id)?
|| user_id
== UserId::parse_with_server_name("conduit", services().globals.server_name()).expect("conduit user exists")
{
return Ok(RoomMessageEventContent::text_plain("The specified user does not exist!"));
}
let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH);
match services()
.users
.set_password(&user_id, Some(new_password.as_str()))
{
Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
"Successfully reset the password for user {user_id}: `{new_password}`"
))),
Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Couldn't reset the password for user {user_id}: {e}"
))),
}
}
pub(super) async fn deactivate_all(body: Vec<&str>, leave_rooms: bool, force: bool) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let usernames = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let mut user_ids: Vec<&UserId> = Vec::new();
for &username in &usernames {
match <&UserId>::try_from(username) {
Ok(user_id) => user_ids.push(user_id),
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"{username} is not a valid username: {e}"
)))
},
}
}
let mut deactivation_count = 0;
let mut admins = Vec::new();
if !force {
user_ids.retain(|&user_id| match services().users.is_admin(user_id) {
Ok(is_admin) => {
if is_admin {
admins.push(user_id.localpart());
false
} else {
true
}
},
Err(_) => false,
});
}
for &user_id in &user_ids {
// check if user belongs to our server and skips over non-local users
if user_id.server_name() != services().globals.server_name() {
continue;
}
// don't deactivate the conduit service account
if user_id
== UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("conduit user exists")
{
continue;
}
// user does not exist on our server
if !services().users.exists(user_id)? {
continue;
}
if services().users.deactivate_account(user_id).is_ok() {
deactivation_count += 1;
}
}
if leave_rooms {
for &user_id in &user_ids {
_ = leave_all_rooms(user_id).await;
}
}
if admins.is_empty() {
Ok(RoomMessageEventContent::text_plain(format!(
"Deactivated {deactivation_count} accounts."
)))
} else {
Ok(RoomMessageEventContent::text_plain(format!(
"Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts",
deactivation_count,
admins.join(", ")
)))
}
} else {
Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
))
}
}
pub(super) async fn list_joined_rooms(_body: Vec<&str>, user_id: String) -> Result<RoomMessageEventContent> {
// Validate user id
let user_id =
match UserId::parse_with_server_name(user_id.as_str().to_lowercase(), services().globals.server_name()) {
Ok(id) => Arc::<UserId>::from(id),
Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"The supplied username is not a valid username: {e}"
)))
},
};
if user_id.server_name() != services().globals.server_name() {
return Ok(RoomMessageEventContent::text_plain("User does not belong to our server."));
}
if !services().users.exists(&user_id)? {
return Ok(RoomMessageEventContent::text_plain("User does not exist on this server."));
}
let mut rooms: Vec<(OwnedRoomId, u64, String)> = services()
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(Result::ok)
.map(|room_id| get_room_info(&room_id))
.sorted_unstable()
.dedup()
.collect();
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
}
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let output_plain = format!(
"Rooms {user_id} Joined:\n{}",
rooms
.iter()
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
.collect::<Vec<_>>()
.join("\n")
);
let output_html = format!(
"<table><caption>Rooms {user_id} \
Joined</caption>\n<tr><th>id</th>\t<th>members</th>\t<th>name</th></tr>\n{}</table>",
rooms
.iter()
.fold(String::new(), |mut output, (id, members, name)| {
writeln!(
output,
"<tr><td>{}</td>\t<td>{}</td>\t<td>{}</td></tr>",
escape_html(id.as_ref()),
members,
escape_html(name)
)
.unwrap();
output
})
);
Ok(RoomMessageEventContent::text_html(output_plain, output_html))
}

View file

@ -34,6 +34,7 @@ impl Client {
.unwrap()
.dns_resolver(resolver.hooked.clone())
.connect_timeout(Duration::from_secs(config.well_known_conn_timeout))
.read_timeout(Duration::from_secs(config.well_known_timeout))
.timeout(Duration::from_secs(config.well_known_timeout))
.pool_max_idle_per_host(0)
.redirect(redirect::Policy::limited(4))
@ -43,6 +44,7 @@ impl Client {
federation: Self::base(config)
.unwrap()
.dns_resolver(resolver.hooked.clone())
.read_timeout(Duration::from_secs(config.federation_timeout))
.timeout(Duration::from_secs(config.federation_timeout))
.pool_max_idle_per_host(config.federation_idle_per_host.into())
.pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout))
@ -53,6 +55,7 @@ impl Client {
sender: Self::base(config)
.unwrap()
.dns_resolver(resolver.hooked.clone())
.read_timeout(Duration::from_secs(config.sender_timeout))
.timeout(Duration::from_secs(config.sender_timeout))
.pool_max_idle_per_host(1)
.pool_idle_timeout(Duration::from_secs(config.sender_idle_timeout))
@ -64,6 +67,7 @@ impl Client {
.unwrap()
.dns_resolver(resolver.clone())
.connect_timeout(Duration::from_secs(5))
.read_timeout(Duration::from_secs(config.appservice_timeout))
.timeout(Duration::from_secs(config.appservice_timeout))
.pool_max_idle_per_host(1)
.pool_idle_timeout(Duration::from_secs(config.appservice_idle_timeout))
@ -90,12 +94,14 @@ impl Client {
let mut builder = reqwest::Client::builder()
.hickory_dns(true)
.timeout(Duration::from_secs(config.request_timeout))
.connect_timeout(Duration::from_secs(config.request_conn_timeout))
.pool_max_idle_per_host(config.request_idle_per_host.into())
.read_timeout(Duration::from_secs(config.request_timeout))
.timeout(Duration::from_secs(config.request_total_timeout))
.pool_idle_timeout(Duration::from_secs(config.request_idle_timeout))
.pool_max_idle_per_host(config.request_idle_per_host.into())
.user_agent("Conduwuit".to_owned() + "/" + &version)
.redirect(redirect::Policy::limited(6));
.redirect(redirect::Policy::limited(6))
.connection_verbose(true);
#[cfg(feature = "gzip_compression")]
{

View file

@ -14,6 +14,7 @@ use argon2::Argon2;
use base64::{engine::general_purpose, Engine as _};
pub use data::Data;
use hickory_resolver::TokioAsyncResolver;
use ipaddress::IPAddress;
use regex::RegexSet;
use ruma::{
api::{
@ -25,7 +26,7 @@ use ruma::{
RoomVersionId, ServerName, UserId,
};
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
use tracing::{error, info};
use tracing::{error, info, trace};
use tracing_subscriber::{EnvFilter, Registry};
use url::Url;
@ -46,6 +47,7 @@ pub struct Service<'a> {
pub tracing_reload_handle: tracing_subscriber::reload::Handle<EnvFilter, Registry>,
pub config: Config,
pub cidr_range_denylist: Vec<IPAddress>,
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
jwt_decoding_key: Option<jsonwebtoken::DecodingKey>,
pub resolver: Arc<resolver::Resolver>,
@ -138,10 +140,18 @@ impl Service<'_> {
argon2::Params::new(19456, 2, 1, None).expect("valid parameters"),
);
let mut cidr_range_denylist = Vec::new();
for cidr in config.ip_range_denylist.clone() {
let cidr = IPAddress::parse(cidr).expect("valid cidr range");
trace!("Denied CIDR range: {:?}", cidr);
cidr_range_denylist.push(cidr);
}
let mut s = Self {
tracing_reload_handle,
db,
config: config.clone(),
cidr_range_denylist,
keypair: Arc::new(keypair),
resolver: resolver.clone(),
client: client::Client::new(config, &resolver),
@ -424,6 +434,16 @@ impl Service<'_> {
pub fn unix_socket_path(&self) -> &Option<PathBuf> { &self.config.unix_socket_path }
pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool {
for cidr in &self.cidr_range_denylist {
if cidr.includes(ip) {
return false;
}
}
true
}
pub fn shutdown(&self) {
self.shutdown.store(true, atomic::Ordering::Relaxed);
// On shutdown

View file

@ -3,9 +3,10 @@ use std::{cmp::Ordering, collections::BTreeMap, sync::Arc};
use ruma::{
canonical_json::redact_content_in_place,
events::{
room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent,
AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent,
AnyTimelineEvent, StateEvent, TimelineEventType,
room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent},
space::child::HierarchySpaceChildEvent,
AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent,
AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType,
},
serde::Raw,
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId,
@ -98,10 +99,47 @@ impl PduEvent {
Ok(())
}
/// Copies the `redacts` property of the event to the `content` dict and
/// vice-versa.
///
/// This follows the specification's
/// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property):
///
/// > For backwards-compatibility with older clients, servers should add a
/// > redacts
/// > property to the top level of m.room.redaction events in when serving
/// > such events
/// > over the Client-Server API.
///
/// > For improved compatibility with newer clients, servers should add a
/// > redacts property
/// > to the content of m.room.redaction events in older room versions when
/// > serving
/// > such events over the Client-Server API.
#[must_use]
pub fn copy_redacts(&self) -> (Option<Arc<EventId>>, Box<RawJsonValue>) {
if self.kind == TimelineEventType::RoomRedaction {
if let Ok(mut content) = serde_json::from_str::<RoomRedactionEventContent>(self.content.get()) {
if let Some(redacts) = content.redacts {
return (Some(redacts.into()), self.content.clone());
} else if let Some(redacts) = self.redacts.clone() {
content.redacts = Some(redacts.into());
return (
self.redacts.clone(),
to_raw_value(&content).expect("Must be valid, we only added redacts field"),
);
}
}
}
(self.redacts.clone(), self.content.clone())
}
#[tracing::instrument(skip(self))]
pub fn to_sync_room_event(&self) -> Raw<AnySyncTimelineEvent> {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
"content": self.content,
"content": content,
"type": self.kind,
"event_id": self.event_id,
"sender": self.sender,
@ -114,7 +152,7 @@ impl PduEvent {
if let Some(state_key) = &self.state_key {
json["state_key"] = json!(state_key);
}
if let Some(redacts) = &self.redacts {
if let Some(redacts) = &redacts {
json["redacts"] = json!(redacts);
}
@ -124,8 +162,9 @@ impl PduEvent {
/// This only works for events that are also AnyRoomEvents.
#[tracing::instrument(skip(self))]
pub fn to_any_event(&self) -> Raw<AnyEphemeralRoomEvent> {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
"content": self.content,
"content": content,
"type": self.kind,
"event_id": self.event_id,
"sender": self.sender,
@ -139,7 +178,7 @@ impl PduEvent {
if let Some(state_key) = &self.state_key {
json["state_key"] = json!(state_key);
}
if let Some(redacts) = &self.redacts {
if let Some(redacts) = &redacts {
json["redacts"] = json!(redacts);
}
@ -148,8 +187,9 @@ impl PduEvent {
#[tracing::instrument(skip(self))]
pub fn to_room_event(&self) -> Raw<AnyTimelineEvent> {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
"content": self.content,
"content": content,
"type": self.kind,
"event_id": self.event_id,
"sender": self.sender,
@ -163,7 +203,7 @@ impl PduEvent {
if let Some(state_key) = &self.state_key {
json["state_key"] = json!(state_key);
}
if let Some(redacts) = &self.redacts {
if let Some(redacts) = &redacts {
json["redacts"] = json!(redacts);
}
@ -172,8 +212,9 @@ impl PduEvent {
#[tracing::instrument(skip(self))]
pub fn to_message_like_event(&self) -> Raw<AnyMessageLikeEvent> {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
"content": self.content,
"content": content,
"type": self.kind,
"event_id": self.event_id,
"sender": self.sender,
@ -187,7 +228,7 @@ impl PduEvent {
if let Some(state_key) = &self.state_key {
json["state_key"] = json!(state_key);
}
if let Some(redacts) = &self.redacts {
if let Some(redacts) = &redacts {
json["redacts"] = json!(redacts);
}

View file

@ -20,7 +20,7 @@ use ruma::{
serde::Raw,
uint, RoomId, UInt, UserId,
};
use tracing::{debug, info, warn};
use tracing::{info, trace, warn};
use crate::{services, Error, PduEvent, Result};
@ -66,19 +66,10 @@ impl Service {
let url = reqwest_request.url().clone();
if let Some(url_host) = url.host_str() {
debug!("Checking request URL for IP");
trace!("Checking request URL for IP");
if let Ok(ip) = IPAddress::parse(url_host) {
let cidr_ranges_s = services().globals.ip_range_denylist().to_vec();
let mut cidr_ranges: Vec<IPAddress> = Vec::new();
for cidr in cidr_ranges_s {
cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup"));
}
for cidr in cidr_ranges {
if cidr.includes(&ip) {
return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
}
if !services().globals.valid_cidr_range(&ip) {
return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
}
}
}
@ -94,20 +85,11 @@ impl Service {
Ok(mut response) => {
// reqwest::Response -> http::Response conversion
debug!("Checking response destination's IP");
trace!("Checking response destination's IP");
if let Some(remote_addr) = response.remote_addr() {
if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) {
let cidr_ranges_s = services().globals.ip_range_denylist().to_vec();
let mut cidr_ranges: Vec<IPAddress> = Vec::new();
for cidr in cidr_ranges_s {
cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup"));
}
for cidr in cidr_ranges {
if cidr.includes(&ip) {
return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
}
if !services().globals.valid_cidr_range(&ip) {
return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
}
}
}

View file

@ -32,7 +32,7 @@ use ruma::{
use tokio::sync::Mutex;
use tracing::{debug, error, warn};
use crate::{services, Error, Result};
use crate::{debug_info, services, Error, Result};
pub struct CachedSpaceHierarchySummary {
summary: SpaceHierarchyParentSummary,
@ -425,8 +425,36 @@ impl Service {
}
async fn get_summary_and_children_federation(
&self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &Vec<OwnedServerName>,
&self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &[OwnedServerName],
) -> Result<Option<SummaryAccessibility>> {
// try to find more servers to fetch hierachy from if the only
// choice is the room ID's server name (usually dead)
//
// all spaces are normal rooms, so they should always have at least
// 1 admin in it which has a far higher chance of their server still
// being alive
let power_levels: ruma::events::room::power_levels::RoomPowerLevelsEventContent = services()
.rooms
.state_accessor
.room_state_get(current_room, &StateEventType::RoomPowerLevels, "")?
.map(|ev| {
serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
})
.transpose()?
.unwrap_or_default();
// add server names of the list of admins in the room for backfill server
via.to_owned().extend(
power_levels
.users
.iter()
.filter(|(_, level)| **level > power_levels.users_default)
.map(|(user_id, _)| user_id.server_name())
.filter(|server| server != &services().globals.server_name())
.map(ToOwned::to_owned),
);
for server in via {
debug!("Asking {server} for /hierarchy");
if let Ok(response) = services()
@ -440,7 +468,7 @@ impl Service {
)
.await
{
debug!("Got response from {server} for /hierarchy\n{response:?}");
debug_info!("Got response from {server} for /hierarchy\n{response:?}");
let summary = response.room.clone();
self.roomid_spacehierarchy_cache.lock().await.insert(
@ -511,7 +539,7 @@ impl Service {
}
async fn get_summary_and_children_client(
&self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &Vec<OwnedServerName>,
&self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &[OwnedServerName],
) -> Result<Option<SummaryAccessibility>> {
if let Ok(Some(response)) = self
.get_summary_and_children_local(current_room, Identifier::UserId(user_id))
@ -631,7 +659,7 @@ impl Service {
suggested_only,
sender_user,
&match room_id.server_name() {
Some(server_name) => vec![server_name.into()],
Some(server_name) => vec![server_name.to_owned()],
None => vec![],
},
)

View file

@ -307,21 +307,6 @@ impl Service {
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
pdu_id.extend_from_slice(&count2.to_be_bytes());
// https://spec.matrix.org/v1.9/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property
// For backwards-compatibility with older clients,
// servers should add a redacts property to the top level of m.room.redaction
// events in when serving such events over the Client-Server API.
if pdu.kind == TimelineEventType::RoomRedaction
&& services().rooms.state.get_room_version(&pdu.room_id)? == RoomVersionId::V11
{
let content = serde_json::from_str::<RoomRedactionEventContent>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?;
if let Some(redact_id) = &content.redacts {
pdu_json.insert("redacts".to_owned(), CanonicalJsonValue::String(redact_id.to_string()));
}
}
// Insert pdu
self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?;

View file

@ -38,7 +38,7 @@ pub use send::FedDest;
const SELECT_EDU_LIMIT: usize = 16;
pub struct Service {
db: &'static dyn Data,
pub db: &'static dyn Data,
/// The state for a given state hash.
pub(super) maximum_requests: Arc<Semaphore>,
@ -272,7 +272,7 @@ impl Service {
});
}
#[tracing::instrument(skip(self), name = "sender")]
#[tracing::instrument(skip_all, name = "sender")]
async fn handler(&self) -> Result<()> {
let receiver = self.receiver.lock().await;

View file

@ -4,7 +4,6 @@ use std::{
net::{IpAddr, SocketAddr},
};
use futures_util::TryFutureExt;
use hickory_resolver::{error::ResolveError, lookup::SrvLookup};
use http::{header::AUTHORIZATION, HeaderValue};
use ipaddress::IPAddress;
@ -15,9 +14,9 @@ use ruma::{
},
OwnedServerName, ServerName,
};
use tracing::{debug, trace, warn};
use tracing::{debug, error, trace};
use crate::{services, Error, Result};
use crate::{debug_error, debug_info, debug_warn, services, Error, Result};
/// Wraps either an literal IP address plus port, or a hostname plus complement
/// (colon-plus-port if it was specified).
@ -43,9 +42,16 @@ pub enum FedDest {
Named(String, String),
}
struct ActualDestination {
destination: FedDest,
host: String,
string: String,
cached: bool,
}
#[tracing::instrument(skip_all, name = "send")]
pub(crate) async fn send_request<T>(
client: &reqwest::Client, destination: &ServerName, request: T,
client: &reqwest::Client, destination: &ServerName, req: T,
) -> Result<T::IncomingResponse>
where
T: OutgoingRequest + Debug,
@ -54,286 +60,148 @@ where
return Err(Error::bad_config("Federation is disabled."));
}
if destination == services().globals.server_name() {
return Err(Error::bad_config("Won't send federation request to ourselves"));
}
if destination.is_ip_literal() || IPAddress::is_valid(destination.host()) {
debug!(
"Destination {} is an IP literal, checking against IP range denylist.",
destination
);
let ip = IPAddress::parse(destination.host()).map_err(|e| {
warn!("Failed to parse IP literal from string: {}", e);
Error::BadServerResponse("Invalid IP address")
trace!("Preparing to send request");
validate_destination(destination)?;
let actual = get_actual_destination(destination).await?;
let mut http_request = req
.try_into_http_request::<Vec<u8>>(&actual.string, SendAccessToken::IfRequired(""), &[MatrixVersion::V1_5])
.map_err(|e| {
debug_warn!("Failed to find destination {}: {}", actual.string, e);
Error::BadServerResponse("Invalid destination")
})?;
let cidr_ranges_s = services().globals.ip_range_denylist().to_vec();
let mut cidr_ranges: Vec<IPAddress> = Vec::new();
sign_request::<T>(destination, &mut http_request);
let request = reqwest::Request::try_from(http_request)?;
let method = request.method().clone();
let url = request.url().clone();
validate_url(&url)?;
for cidr in cidr_ranges_s {
cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup"));
}
debug!(
method = ?method,
url = ?url,
"Sending request",
);
match client.execute(request).await {
Ok(response) => handle_response::<T>(destination, actual, &method, &url, response).await,
Err(e) => handle_error::<T>(destination, &actual, &method, &url, e),
}
}
debug!("List of pushed CIDR ranges: {:?}", cidr_ranges);
async fn handle_response<T>(
destination: &ServerName, actual: ActualDestination, method: &reqwest::Method, url: &reqwest::Url,
mut response: reqwest::Response,
) -> Result<T::IncomingResponse>
where
T: OutgoingRequest + Debug,
{
trace!("Received response from {} for {} with {}", actual.string, url, response.url());
let status = response.status();
let mut http_response_builder = http::Response::builder()
.status(status)
.version(response.version());
mem::swap(
response.headers_mut(),
http_response_builder
.headers_mut()
.expect("http::response::Builder is usable"),
);
for cidr in cidr_ranges {
if cidr.includes(&ip) {
return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
}
}
trace!("Waiting for response body");
let body = response.bytes().await.unwrap_or_else(|e| {
debug_error!("server error {}", e);
Vec::new().into()
}); // TODO: handle timeout
debug!("IP literal {} is allowed.", destination);
let http_response = http_response_builder
.body(body)
.expect("reqwest body is valid http body");
debug!("Got {status:?} for {method} {url}");
if !status.is_success() {
return Err(Error::FederationError(
destination.to_owned(),
RumaError::from_http_response(http_response),
));
}
trace!("Preparing to send request to {destination}");
let response = T::IncomingResponse::try_from_http_response(http_response);
if response.is_ok() && !actual.cached {
services()
.globals
.actual_destinations()
.write()
.await
.insert(OwnedServerName::from(destination), (actual.destination, actual.host));
}
let mut write_destination_to_cache = false;
match response {
Err(_e) => Err(Error::BadServerResponse("Server returned bad 200 response.")),
Ok(response) => Ok(response),
}
}
fn handle_error<T>(
_destination: &ServerName, actual: &ActualDestination, method: &reqwest::Method, url: &reqwest::Url,
e: reqwest::Error,
) -> Result<T::IncomingResponse>
where
T: OutgoingRequest + Debug,
{
// we do not need to log that servers in a room are dead, this is normal in
// public rooms and just spams the logs.
if e.is_timeout() {
debug_error!("timeout {}: {}", actual.host, e);
} else if e.is_connect() {
debug_error!("connect {}: {}", actual.host, e);
} else if e.is_redirect() {
debug_error!(
method = ?method,
url = ?url,
final_url = ?e.url(),
"Redirect loop {}: {}",
actual.host,
e,
);
} else {
debug_error!("{}: {}", actual.host, e);
}
Err(e.into())
}
#[tracing::instrument(skip_all, name = "resolve")]
async fn get_actual_destination(server_name: &ServerName) -> Result<ActualDestination> {
let cached;
let cached_result = services()
.globals
.actual_destinations()
.read()
.await
.get(destination)
.get(server_name)
.cloned();
let (actual_destination, host) = if let Some(result) = cached_result {
let (destination, host) = if let Some(result) = cached_result {
cached = true;
result
} else {
write_destination_to_cache = true;
let result = resolve_actual_destination(destination).await;
(result.0, result.1.into_uri_string())
cached = false;
resolve_actual_destination(server_name).await?
};
let actual_destination_str = actual_destination.clone().into_https_string();
let mut http_request = request
.try_into_http_request::<Vec<u8>>(
&actual_destination_str,
SendAccessToken::IfRequired(""),
&[MatrixVersion::V1_5],
)
.map_err(|e| {
warn!("Failed to find destination {}: {}", actual_destination_str, e);
Error::BadServerResponse("Invalid destination")
})?;
let mut request_map = serde_json::Map::new();
if !http_request.body().is_empty() {
request_map.insert(
"content".to_owned(),
serde_json::from_slice(http_request.body()).expect("body is valid json, we just created it"),
);
};
request_map.insert("method".to_owned(), T::METADATA.method.to_string().into());
request_map.insert(
"uri".to_owned(),
http_request
.uri()
.path_and_query()
.expect("all requests have a path")
.to_string()
.into(),
);
request_map.insert("origin".to_owned(), services().globals.server_name().as_str().into());
request_map.insert("destination".to_owned(), destination.as_str().into());
let mut request_json = serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap");
ruma::signatures::sign_json(
services().globals.server_name().as_str(),
services().globals.keypair(),
&mut request_json,
)
.expect("our request json is what ruma expects");
let request_json: serde_json::Map<String, serde_json::Value> =
serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap();
let signatures = request_json["signatures"]
.as_object()
.unwrap()
.values()
.map(|v| {
v.as_object()
.unwrap()
.iter()
.map(|(k, v)| (k, v.as_str().unwrap()))
});
for signature_server in signatures {
for s in signature_server {
http_request.headers_mut().insert(
AUTHORIZATION,
HeaderValue::from_str(&format!(
"X-Matrix origin={},key=\"{}\",sig=\"{}\"",
services().globals.server_name(),
s.0,
s.1
))
.unwrap(),
);
}
}
let reqwest_request = reqwest::Request::try_from(http_request)?;
let method = reqwest_request.method().clone();
let url = reqwest_request.url().clone();
if let Some(url_host) = url.host_str() {
trace!("Checking request URL for IP");
if let Ok(ip) = IPAddress::parse(url_host) {
let cidr_ranges_s = services().globals.ip_range_denylist().to_vec();
let mut cidr_ranges: Vec<IPAddress> = Vec::new();
for cidr in cidr_ranges_s {
cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup"));
}
for cidr in cidr_ranges {
if cidr.includes(&ip) {
return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
}
}
}
}
debug!("Sending request {} {}", method, url);
let response = client.execute(reqwest_request).await;
trace!("Received resonse {} {}", method, url);
match response {
Ok(mut response) => {
// reqwest::Response -> http::Response conversion
trace!("Checking response destination's IP");
if let Some(remote_addr) = response.remote_addr() {
if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) {
let cidr_ranges_s = services().globals.ip_range_denylist().to_vec();
let mut cidr_ranges: Vec<IPAddress> = Vec::new();
for cidr in cidr_ranges_s {
cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup"));
}
for cidr in cidr_ranges {
if cidr.includes(&ip) {
return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
}
}
}
}
let status = response.status();
let mut http_response_builder = http::Response::builder()
.status(status)
.version(response.version());
mem::swap(
response.headers_mut(),
http_response_builder
.headers_mut()
.expect("http::response::Builder is usable"),
);
trace!("Waiting for response body");
let body = response.bytes().await.unwrap_or_else(|e| {
debug!("server error {}", e);
Vec::new().into()
}); // TODO: handle timeout
if !status.is_success() {
debug!(
"Got {status:?} for {method} {url}: {}",
String::from_utf8_lossy(&body)
.lines()
.collect::<Vec<_>>()
.join(" ")
);
}
let http_response = http_response_builder
.body(body)
.expect("reqwest body is valid http body");
if status.is_success() {
debug!("Got {status:?} for {method} {url}");
let response = T::IncomingResponse::try_from_http_response(http_response);
if response.is_ok() && write_destination_to_cache {
services()
.globals
.actual_destinations()
.write()
.await
.insert(OwnedServerName::from(destination), (actual_destination, host));
}
response.map_err(|e| {
debug!("Invalid 200 response for {} {}", url, e);
Error::BadServerResponse("Server returned bad 200 response.")
})
} else {
Err(Error::FederationError(
destination.to_owned(),
RumaError::from_http_response(http_response),
))
}
},
Err(e) => {
// we do not need to log that servers in a room are dead, this is normal in
// public rooms and just spams the logs.
if e.is_timeout() {
debug!(
"Timed out sending request to {} at {}: {}",
destination, actual_destination_str, e
);
} else if e.is_connect() {
debug!("Failed to connect to {} at {}: {}", destination, actual_destination_str, e);
} else if e.is_redirect() {
debug!(
"Redirect loop sending request to {} at {}: {}\nFinal URL: {:?}",
destination,
actual_destination_str,
e,
e.url()
);
} else {
debug!("Could not send request to {} at {}: {}", destination, actual_destination_str, e);
}
Err(e.into())
},
}
}
fn get_ip_with_port(destination_str: &str) -> Option<FedDest> {
if let Ok(destination) = destination_str.parse::<SocketAddr>() {
Some(FedDest::Literal(destination))
} else if let Ok(ip_addr) = destination_str.parse::<IpAddr>() {
Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448)))
} else {
None
}
}
fn add_port_to_hostname(destination_str: &str) -> FedDest {
let (host, port) = match destination_str.find(':') {
None => (destination_str, ":8448"),
Some(pos) => destination_str.split_at(pos),
};
FedDest::Named(host.to_owned(), port.to_owned())
let string = destination.clone().into_https_string();
Ok(ActualDestination {
destination,
host,
string,
cached,
})
}
/// Returns: `actual_destination`, host header
/// Implemented according to the specification at <https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names>
/// Numbers in comments below refer to bullet points in linked section of
/// specification
#[tracing::instrument(skip_all, name = "resolve")]
async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
async fn resolve_actual_destination(destination: &'_ ServerName) -> Result<(FedDest, String)> {
trace!("Finding actual destination for {destination}");
let destination_str = destination.as_str().to_owned();
let mut hostname = destination_str.clone();
@ -347,12 +215,12 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, Fe
debug!("2: Hostname with included port");
let (host, port) = destination_str.split_at(pos);
query_and_cache_override(host, host, port.parse::<u16>().unwrap_or(8448)).await;
query_and_cache_override(host, host, port.parse::<u16>().unwrap_or(8448)).await?;
FedDest::Named(host.to_owned(), port.to_owned())
} else {
trace!("Requesting well known for {destination}");
if let Some(delegated_hostname) = request_well_known(destination.as_str()).await {
if let Some(delegated_hostname) = request_well_known(destination.as_str()).await? {
debug!("3: A .well-known file is available");
hostname = add_port_to_hostname(&delegated_hostname).into_uri_string();
match get_ip_with_port(&delegated_hostname) {
@ -362,12 +230,12 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, Fe
debug!("3.2: Hostname with port in .well-known file");
let (host, port) = delegated_hostname.split_at(pos);
query_and_cache_override(host, host, port.parse::<u16>().unwrap_or(8448)).await;
query_and_cache_override(host, host, port.parse::<u16>().unwrap_or(8448)).await?;
FedDest::Named(host.to_owned(), port.to_owned())
} else {
trace!("Delegated hostname has no port in this branch");
if let Some(hostname_override) = query_srv_record(&delegated_hostname).await {
if let Some(hostname_override) = query_srv_record(&delegated_hostname).await? {
debug!("3.3: SRV lookup successful");
let force_port = hostname_override.port();
@ -376,7 +244,7 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, Fe
&hostname_override.hostname(),
force_port.unwrap_or(8448),
)
.await;
.await?;
if let Some(port) = force_port {
FedDest::Named(delegated_hostname, format!(":{port}"))
@ -385,7 +253,7 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, Fe
}
} else {
debug!("3.4: No SRV records, just use the hostname from .well-known");
query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448).await;
query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448).await?;
add_port_to_hostname(&delegated_hostname)
}
}
@ -393,12 +261,12 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, Fe
}
} else {
trace!("4: No .well-known or an error occured");
if let Some(hostname_override) = query_srv_record(&destination_str).await {
if let Some(hostname_override) = query_srv_record(&destination_str).await? {
debug!("4: No .well-known; SRV record found");
let force_port = hostname_override.port();
query_and_cache_override(&hostname, &hostname_override.hostname(), force_port.unwrap_or(8448))
.await;
.await?;
if let Some(port) = force_port {
FedDest::Named(hostname.clone(), format!(":{port}"))
@ -407,7 +275,7 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, Fe
}
} else {
debug!("4: No .well-known; 5: No SRV record found");
query_and_cache_override(&destination_str, &destination_str, 8448).await;
query_and_cache_override(&destination_str, &destination_str, 8448).await?;
add_port_to_hostname(&destination_str)
}
}
@ -429,19 +297,79 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, Fe
};
debug!("Actual destination: {actual_destination:?} hostname: {hostname:?}");
(actual_destination, hostname)
Ok((actual_destination, hostname.into_uri_string()))
}
async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) {
#[tracing::instrument(skip_all, name = "well-known")]
async fn request_well_known(destination: &str) -> Result<Option<String>> {
if !services()
.globals
.resolver
.overrides
.read()
.unwrap()
.contains_key(destination)
{
query_and_cache_override(destination, destination, 8448).await?;
}
let response = services()
.globals
.client
.well_known
.get(&format!("https://{destination}/.well-known/matrix/server"))
.send()
.await;
trace!("response: {:?}", response);
if let Err(e) = &response {
debug!("error: {e:?}");
return Ok(None);
}
let response = response?;
if !response.status().is_success() {
debug!("response not 2XX");
return Ok(None);
}
let text = response.text().await?;
trace!("response text: {:?}", text);
if text.len() >= 12288 {
debug_warn!("response contains junk");
return Ok(None);
}
let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default();
let m_server = body
.get("m.server")
.unwrap_or(&serde_json::Value::Null)
.as_str()
.unwrap_or_default();
if ruma_identifiers_validation::server_name::validate(m_server).is_err() {
debug_error!("response content missing or invalid");
return Ok(None);
}
debug_info!("{:?} found at {:?}", destination, m_server);
Ok(Some(m_server.to_owned()))
}
#[tracing::instrument(skip_all, name = "ip")]
async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> {
match services()
.globals
.dns_resolver()
.lookup_ip(hostname.to_owned())
.await
{
Err(e) => handle_resolve_error(&e),
Ok(override_ip) => {
trace!("Caching result of {:?} overriding {:?}", hostname, overname);
if hostname != overname {
debug_info!("{:?} overriden by {:?}", overname, hostname);
}
services()
.globals
.resolver
@ -449,14 +377,14 @@ async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u1
.write()
.unwrap()
.insert(overname.to_owned(), (override_ip.iter().collect(), port));
},
Err(e) => {
debug!("Got {:?} for {:?} to override {:?}", e.kind(), hostname, overname);
Ok(())
},
}
}
async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
#[tracing::instrument(skip_all, name = "srv")]
async fn query_srv_record(hostname: &'_ str) -> Result<Option<FedDest>> {
fn handle_successful_srv(srv: &SrvLookup) -> Option<FedDest> {
srv.iter().next().map(|result| {
FedDest::Named(
@ -476,61 +404,163 @@ async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
.await
}
let first_hostname = format!("_matrix-fed._tcp.{hostname}.");
let second_hostname = format!("_matrix._tcp.{hostname}.");
let hostnames = [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")];
lookup_srv(&first_hostname)
.or_else(|_| {
trace!("Querying deprecated _matrix SRV record for host {:?}", hostname);
lookup_srv(&second_hostname)
})
.and_then(|srv_lookup| async move { Ok(handle_successful_srv(&srv_lookup)) })
.await
.ok()
.flatten()
for hostname in hostnames {
match lookup_srv(&hostname).await {
Ok(result) => return Ok(handle_successful_srv(&result)),
Err(e) => handle_resolve_error(&e)?,
}
}
Ok(None)
}
async fn request_well_known(destination: &str) -> Option<String> {
if !services()
.globals
.resolver
.overrides
.read()
.unwrap()
.contains_key(destination)
{
query_and_cache_override(destination, destination, 8448).await;
#[allow(clippy::single_match_else)]
fn handle_resolve_error(e: &ResolveError) -> Result<()> {
use hickory_resolver::error::ResolveErrorKind;
match *e.kind() {
ResolveErrorKind::Io {
..
} => {
error!("{e}");
Err(Error::Error(e.to_string()))
},
_ => {
debug_error!("{e}");
Ok(())
},
}
}
let response = services()
.globals
.client
.well_known
.get(&format!("https://{destination}/.well-known/matrix/server"))
.send()
.await;
trace!("Well known response: {:?}", response);
if let Err(e) = &response {
debug!("Well known error: {e:?}");
return None;
}
let text = response.ok()?.text().await;
trace!("Well known response text: {:?}", text);
if text.as_ref().ok()?.len() > 10000 {
debug!(
"Well known response for destination '{destination}' exceeded past 10000 characters, assuming no \
well-known."
fn sign_request<T>(destination: &ServerName, http_request: &mut http::Request<Vec<u8>>)
where
T: OutgoingRequest + Debug,
{
let mut req_map = serde_json::Map::new();
if !http_request.body().is_empty() {
req_map.insert(
"content".to_owned(),
serde_json::from_slice(http_request.body()).expect("body is valid json, we just created it"),
);
return None;
};
req_map.insert("method".to_owned(), T::METADATA.method.to_string().into());
req_map.insert(
"uri".to_owned(),
http_request
.uri()
.path_and_query()
.expect("all requests have a path")
.to_string()
.into(),
);
req_map.insert("origin".to_owned(), services().globals.server_name().as_str().into());
req_map.insert("destination".to_owned(), destination.as_str().into());
let mut req_json = serde_json::from_value(req_map.into()).expect("valid JSON is valid BTreeMap");
ruma::signatures::sign_json(
services().globals.server_name().as_str(),
services().globals.keypair(),
&mut req_json,
)
.expect("our request json is what ruma expects");
let req_json: serde_json::Map<String, serde_json::Value> =
serde_json::from_slice(&serde_json::to_vec(&req_json).unwrap()).unwrap();
let signatures = req_json["signatures"]
.as_object()
.expect("signatures object")
.values()
.map(|v| {
v.as_object()
.expect("server signatures object")
.iter()
.map(|(k, v)| (k, v.as_str().expect("server signature string")))
});
for signature_server in signatures {
for s in signature_server {
http_request.headers_mut().insert(
AUTHORIZATION,
HeaderValue::from_str(&format!(
"X-Matrix origin={},key=\"{}\",sig=\"{}\"",
services().globals.server_name(),
s.0,
s.1
))
.expect("formatted X-Matrix header"),
);
}
}
}
fn validate_url(url: &reqwest::Url) -> Result<()> {
if let Some(url_host) = url.host_str() {
if let Ok(ip) = IPAddress::parse(url_host) {
trace!("Checking request URL IP {ip:?}");
validate_ip(&ip)?;
}
}
let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?;
trace!("serde_json body of well known text: {}", body);
Ok(())
}
Some(body.get("m.server")?.as_str()?.to_owned())
fn validate_destination(destination: &ServerName) -> Result<()> {
if destination == services().globals.server_name() {
return Err(Error::bad_config("Won't send federation request to ourselves"));
}
if destination.is_ip_literal() || IPAddress::is_valid(destination.host()) {
validate_destination_ip_literal(destination)?;
}
Ok(())
}
fn validate_destination_ip_literal(destination: &ServerName) -> Result<()> {
trace!("Destination is an IP literal, checking against IP range denylist.",);
debug_assert!(
destination.is_ip_literal() || !IPAddress::is_valid(destination.host()),
"Destination is not an IP literal."
);
let ip = IPAddress::parse(destination.host()).map_err(|e| {
debug_error!("Failed to parse IP literal from string: {}", e);
Error::BadServerResponse("Invalid IP address")
})?;
validate_ip(&ip)?;
Ok(())
}
fn validate_ip(ip: &IPAddress) -> Result<()> {
if !services().globals.valid_cidr_range(ip) {
return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
}
Ok(())
}
fn get_ip_with_port(destination_str: &str) -> Option<FedDest> {
if let Ok(destination) = destination_str.parse::<SocketAddr>() {
Some(FedDest::Literal(destination))
} else if let Ok(ip_addr) = destination_str.parse::<IpAddr>() {
Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448)))
} else {
None
}
}
fn add_port_to_hostname(destination_str: &str) -> FedDest {
let (host, port) = match destination_str.find(':') {
None => (destination_str, ":8448"),
Some(pos) => destination_str.split_at(pos),
};
FedDest::Named(host.to_owned(), port.to_owned())
}
impl FedDest {

42
src/utils/debug.rs Normal file
View file

@ -0,0 +1,42 @@
/// Log event at given level in debug-mode (when debug-assertions are enabled).
/// In release mode it becomes DEBUG level, and possibly subject to elision.
#[macro_export]
macro_rules! debug_event {
( $level:expr, $($x:tt)+ ) => {
if cfg!(debug_assertions) {
tracing::event!( $level, $($x)+ );
} else {
tracing::debug!( $($x)+ );
}
}
}
/// Log message at the ERROR level in debug-mode (when debug-assertions are
/// enabled). In release mode it becomes DEBUG level, and possibly subject to
/// elision.
#[macro_export]
macro_rules! debug_error {
( $($x:tt)+ ) => {
$crate::debug_event!(tracing::Level::ERROR, $($x)+ );
}
}
/// Log message at the WARN level in debug-mode (when debug-assertions are
/// enabled). In release mode it becomes DEBUG level, and possibly subject to
/// elision.
#[macro_export]
macro_rules! debug_warn {
( $($x:tt)+ ) => {
$crate::debug_event!(tracing::Level::WARN, $($x)+ );
}
}
/// Log message at the INFO level in debug-mode (when debug-assertions are
/// enabled). In release mode it becomes DEBUG level, and possibly subject to
/// elision.
#[macro_export]
macro_rules! debug_info {
( $($x:tt)+ ) => {
$crate::debug_event!(tracing::Level::INFO, $($x)+ );
}
}

View file

@ -1,3 +1,4 @@
pub(crate) mod debug;
pub(crate) mod error;
use std::{

View file

@ -1,15 +0,0 @@
# Complement
## What's that?
Have a look at [its repository](https://github.com/matrix-org/complement).
## How do I use it with conduwuit?
For reproducible results, Complement support in conduwuit uses Nix to run and generate an image.
After installing Nix, you can run either:
- `nix run #.complement-runtime -- ./path/to/logs.jsonl ./path/to/results.jsonl` to build a Complement image, run the tests, and output the logs and results to the specified paths.
- `nix run #.complement-image` to just build a Complement image

View file

@ -1,592 +0,0 @@
# =============================================================================
# This is the official complement config for conduwuit.
# DO NOT USE IT IN ACTUAL SERVERS
# =============================================================================
[global]
# The server_name is the pretty name of this server. It is used as a suffix for user
# and room ids. Examples: matrix.org, conduit.rs
# The Conduit server needs all /_matrix/ requests to be reachable at
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
# If that's not possible for you, you can create /.well-known files to redirect
# requests (delegation). See
# https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient
# and
# https://spec.matrix.org/v1.9/server-server-api/#getwell-knownmatrixserver
# for more information
# We set this via env var
# server_name = "your.server.name"
# Servers listed here will be used to gather public keys of other servers (notary trusted key servers).
#
# The default behaviour for conduwuit is to attempt to query trusted key servers before querying the individual servers.
# This is done for performance reasons, but if you would like to query individual servers before the notary servers
# configured below, set to
#
# (Currently, conduwuit doesn't support batched key requests, so this list should only contain Synapse servers)
# Defaults to `matrix.org`
trusted_servers = []
# Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
# Conduwuit's Sentry reporting endpoint is o4506996327251968.ingest.us.sentry.io
#
# Defaults to false
#sentry = false
# Report your Conduwuit server_name in Sentry.io crash reports and metrics
#
# Defaults to false
#sentry_send_server_name = false
# Performance monitoring/tracing sample rate for Sentry.io
#
# Note that too high values may impact performance, and can be disabled by setting it to 0.0
#
# Defaults to 0.15
#sentry_traces_sample_rate = 0.15
### Database configuration
# This is the only directory where conduwuit will save its data, including media
database_path = "/conduwuit/data"
# Database backend: Only rocksdb and sqlite are supported. Please note that sqlite
# will perform significantly worse than rocksdb as it is not intended to be used the
# way it is by conduwuit. sqlite only exists for historical reasons.
database_backend = "rocksdb"
### Network
# The port(s) conduwuit will be running on. You need to set up a reverse proxy such as
# Caddy or Nginx so all requests to /_matrix on port 443 and 8448 will be
# forwarded to the conduwuit instance running on this port
# Docker users: Don't change this, you'll need to map an external port to this.
# To listen on multiple ports, specify a vector e.g. [8080, 8448]
port = [8008, 8448]
# default address (IPv4 or IPv6) conduwuit will listen on. Generally you want this to be
# localhost (127.0.0.1 / ::1). If you are using Docker or a container NAT networking setup, you
# likely need this to be 0.0.0.0.
address = "0.0.0.0"
# How many requests conduwuit sends to other servers at the same time concurrently. Default is 500
# Note that because conduwuit is very fast unlike other homeserver implementations, setting this too
# high could inadvertently result in ratelimits kicking in, or overloading lower-end homeservers out there.
#
# A valid use-case for enabling this is if you have a significant amount of overall federation activity
# such as many rooms joined/tracked, and many servers in the true destination cache caused by that. Upon
# rebooting conduwuit, depending on how fast your resources are, client and incoming federation requests
# may timeout or be "stalled" for a period of time due to hitting the max concurrent requests limit from
# refreshing federation/destination caches and such.
#
# If you have a lot of active users on your homeserver, you will definitely need to raise this.
#
# No this will not speed up room joins.
max_concurrent_requests = 2000
# Max request size for file uploads
max_request_size = 100_000_000 # in bytes
# Uncomment unix_socket_path to listen on a UNIX socket at the specified path.
# If listening on a UNIX socket, you must remove/comment the 'address' key if defined and add your
# reverse proxy to the 'conduwuit' group, unless world RW permissions are specified with unix_socket_perms (666 minimum).
#unix_socket_path = "/run/conduwuit/conduwuit.sock"
#unix_socket_perms = 660
# Set this to true for conduwuit to compress HTTP response bodies using zstd.
# This option does nothing if conduwuit was not built with `zstd_compression` feature.
# Please be aware that enabling HTTP compression may weaken TLS.
# Most users should not need to enable this.
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this.
zstd_compression = false
# Set this to true for conduwuit to compress HTTP response bodies using gzip.
# This option does nothing if conduwuit was not built with `gzip_compression` feature.
# Please be aware that enabling HTTP compression may weaken TLS.
# Most users should not need to enable this.
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this.
gzip_compression = false
# Set this to true for conduwuit to compress HTTP response bodies using brotli.
# This option does nothing if conduwuit was not built with `brotli_compression` feature.
# Please be aware that enabling HTTP compression may weaken TLS.
# Most users should not need to enable this.
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this.
brotli_compression = false
# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you do not want conduwuit to send outbound requests to.
# Defaults to RFC1918, unroutable, loopback, multicast, and testnet addresses for security.
#
# To disable, set this to be an empty vector (`[]`).
# Please be aware that this is *not* a guarantee. You should be using a firewall with zones as doing this on the application layer may have bypasses.
#
# Currently this does not account for proxies in use like Synapse does.
ip_range_denylist = []
### Moderation / Privacy / Security
# Set to true to allow user type "guest" registrations. Element attempts to register guest users automatically.
# For private homeservers, this is best at false.
allow_guest_registration = true
# Vector list of servers that conduwuit will refuse to download remote media from.
# No default.
# prevent_media_downloads_from = ["example.com", "example.local"]
# Enable complement tests being able to register
allow_registration = true
# DO NOT USE THIS IN ON REAL SERVERS
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true
# controls whether federation is allowed or not
# defaults to true
allow_federation = true
# controls whether users are allowed to create rooms.
# appservices and admins are always allowed to create rooms
# defaults to true
allow_room_creation = true
# controls whether non-admin local users are forbidden from sending room invites (local and remote),
# and if non-admin users can receive remote room invites. admins are always allowed to send and receive all room invites.
# defaults to false
block_non_admin_invites = false
# List of forbidden username patterns/strings. Values in this list are matched as *contains*.
# This is checked upon username availability check, registration, and startup as warnings if any local users in your database
# have a forbidden username.
# No default.
# forbidden_usernames = []
# List of forbidden room aliases and room IDs as patterns/strings. Values in this list are matched as *contains*.
# This is checked upon room alias creation, custom room ID creation if used, and startup as warnings if any room aliases
# in your database have a forbidden room alias/ID.
# No default.
# forbidden_alias_names = []
# Set this to true to allow your server's public room directory to be federated.
# Set this to false to protect against /publicRooms spiders, but will forbid external users
# from viewing your server's public room directory. If federation is disabled entirely
# (`allow_federation`), this is inherently false.
allow_public_room_directory_over_federation = true
# Set this to true to allow your server's public room directory to be queried without client
# authentication (access token) through the Client APIs. Set this to false to protect against /publicRooms spiders.
allow_public_room_directory_without_auth = true
# Set this to true to lock down your server's public room directory and only allow admins to publish rooms to the room directory.
# Unpublishing is still allowed by all users with this enabled.
#
# Defaults to false
lockdown_public_room_directory = false
# Set this to true to allow federating device display names / allow external users to see your device display name.
# If federation is disabled entirely (`allow_federation`), this is inherently false. For privacy, this is best disabled.
allow_device_name_federation = true
# Vector list of domains allowed to send requests to for URL previews. Defaults to none.
# Note: this is a *contains* match, not an explicit match. Putting "google.com" will match "https://google.com" and "http://mymaliciousdomainexamplegoogle.com"
# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so.
url_preview_domain_contains_allowlist = ["*"]
# Vector list of explicit domains allowed to send requests to for URL previews. Defaults to none.
# Note: This is an *explicit* match, not a ccontains match. Putting "google.com" will match "https://google.com", "http://google.com", but not "https://mymaliciousdomainexamplegoogle.com"
# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so.
url_preview_domain_explicit_allowlist = []
# Vector list of URLs allowed to send requests to for URL previews. Defaults to none.
# Note that this is a *contains* match, not an explicit match. Putting "google.com" will match "https://google.com/", "https://google.com/url?q=https://mymaliciousdomainexample.com", and "https://mymaliciousdomainexample.com/hi/google.com"
# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so.
url_preview_url_contains_allowlist = []
# Maximum amount of bytes allowed in a URL preview body size when spidering. Defaults to 384KB (384_000 bytes)
url_preview_max_spider_size = 384_000
# Option to decide whether you would like to run the domain allowlist checks (contains and explicit) on the root domain or not. Does not apply to URL contains allowlist. Defaults to false.
# Example: If this is enabled and you have "wikipedia.org" allowed in the explicit and/or contains domain allowlist, it will allow all subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is checked and matched.
# Useful if the domain contains allowlist is still too broad for you but you still want to allow all the subdomains under a root domain.
url_preview_check_root_domain = false
# A single contact and/or support page for /.well-known/matrix/support
# All options here are strings. Currently only supports 1 single contact.
# No default.
#well_known_support_page = ""
#well_known_support_role = ""
#well_known_support_email = ""
#well_known_support_mxid = ""
# Config option to allow or disallow incoming federation requests that obtain the profiles
# of our local users from `/_matrix/federation/v1/query/profile`
#
# This is inherently false if `allow_federation` is disabled
#
# Defaults to true
allow_profile_lookup_federation_requests = true
### Misc
# max log level for conduwuit. allows debug, info, warn, or error
# see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
# For release builds, the maximum log level for conduwuit is info. For debug builds, it is "trace".
# Defaults to "warn"
log = "trace"
# controls whether encrypted rooms and events are allowed (default true)
#allow_encryption = false
# if enabled, conduwuit will send a simple GET request periodically to `https://pupbrain.dev/check-for-updates/stable`
# for any new announcements made. Despite the name, this is not an update check
# endpoint, it is simply an announcement check endpoint.
# Defaults to false.
allow_check_for_updates = false
# If you are using delegation via well-known files and you cannot serve them from your reverse proxy, you can
# uncomment these to serve them directly from conduwuit. This requires proxying all requests to conduwuit, not just `/_matrix` to work.
# Note that whatever you put will show up in the well-known JSON values.
# Set to false to disable users from joining or creating room versions that aren't 100% officially supported by conduwuit.
# conduwuit officially supports room versions 6 - 10. conduwuit has experimental/unstable support for 3 - 5, and 11.
# Defaults to true.
allow_unstable_room_versions = true
# Option to control adding arbitrary text to the end of the user's displayname upon registration with a space before the text.
# This was the lightning bolt emoji option, just replaced with support for adding your own custom text or emojis.
# To disable, set this to "" (an empty string)
# Defaults to "🏳️‍⚧️" (trans pride flag)
#new_user_displayname_suffix = ""
# Option to control whether conduwuit will query your list of trusted notary key servers (`trusted_servers`) for
# remote homeserver signing keys it doesn't know *first*, or query the individual servers first before falling back to the trusted
# key servers.
#
# The former/default behaviour makes federated/remote rooms joins generally faster because we're querying a single (or list of) server
# that we know works, is reasonably fast, and is reliable for just about all the homeserver signing keys in the room. Querying individual
# servers may take longer depending on the general infrastructure of everyone in there, how many dead servers there are, etc.
#
# However, this does create an increased reliance on one single or multiple large entities as `trusted_servers` should generally
# contain long-term and large servers who know a very large number of homeservers.
#
# If you don't know what any of this means, leave this and `trusted_servers` alone to their defaults.
#
# Defaults to true as this is the fastest option for federation.
query_trusted_key_servers_first = true
# List/vector of room **IDs** that conduwuit will make newly registered users join.
# The room IDs specified must be rooms that you have joined at least once on the server, and must be public.
#
# No default.
#auto_join_rooms = []
# Retry failed and incomplete messages to remote servers immediately upon startup. This is called bursting.
# If this is disabled, said messages may not be delivered until more messages are queued for that server.
# Do not change this option unless server resources are extremely limited or the scale of the server's
# deployment is huge. Do not disable this unless you know what you are doing.
startup_netburst = true
# Limit the startup netburst to the most recent (default: 50) messages queued for each remote server. All older
# messages are dropped and not reattempted. The `startup_netburst` option must be enabled for this value to have
# any effect. Do not change this value unless you know what you are doing. Set this value to -1 to reattempt
# every message without trimming the queues; this may consume significant disk. Set this value to 0 to drop all
# messages without any attempt at redelivery.
#startup_netburst_keep = 50
### Generic database options
# Set this to any float value to multiply conduwuit's in-memory LRU caches with.
# May be useful if you have significant memory to spare to increase performance.
# Defaults to 1.0.
#conduit_cache_capacity_modifier = 1.0
# Set this to any float value in megabytes for conduwuit to tell the database engine that this much memory is available for database-related caches.
# May be useful if you have significant memory to spare to increase performance.
# Defaults to 256.0
#db_cache_capacity_mb = 256.0
# Interval in seconds when conduwuit will run database cleanup operations.
#
# For SQLite: this will flush the WAL by executing `PRAGMA wal_checkpoint(RESTART)` (https://www.sqlite.org/pragma.html#pragma_wal_checkpoint)
# For RocksDB: this will run `flush_opt` to flush database memtables to SST files on disk (https://docs.rs/rocksdb/latest/rocksdb/struct.DBCommon.html#method.flush_opt)
# These operations always run on shutdown.
#
# Defaults to 30 minutes (1800 seconds) to avoid IO amplification from too frequent cleanups
#cleanup_second_interval = 1800
### RocksDB options
# Set this to true to use RocksDB config options that are tailored to HDDs (slower device storage)
#
# It is worth noting that by default, conduwuit will use RocksDB with Direct IO enabled. *Generally* speaking this improves performance as it bypasses buffered I/O (system page cache).
# However there is a potential chance that Direct IO may cause issues with database operations if your setup is uncommon. This has been observed with FUSE filesystems, and possibly ZFS filesystem.
# RocksDB generally deals/corrects these issues but it cannot account for all setups.
# If you experience any weird RocksDB issues, try enabling this option as it turns off Direct IO and feel free to report in the conduwuit Matrix room if this option fixes your DB issues.
# See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information.
#
# Defaults to false
#rocksdb_optimize_for_spinning_disks = false
# RocksDB log level. This is not the same as conduwuit's log level. This is the log level for the RocksDB engine/library
# which show up in your database folder/path as `LOG` files. Defaults to error. conduwuit will typically log RocksDB errors as normal.
#rocksdb_log_level = "error"
# Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB.
#rocksdb_max_log_file_size = 4194304
# Time in seconds before RocksDB will forcibly rotate logs. Defaults to 0.
#rocksdb_log_time_to_roll = 0
# Amount of threads that RocksDB will use for parallelism on database operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use all your physical cores.
#
# Defaults to your CPU physical core count (not logical threads).
#rocksdb_parallelism_threads = 0
# Maximum number of LOG files RocksDB will keep. This must *not* be set to 0. It must be at least 1.
# Defaults to 3 as these are not very useful.
#rocksdb_max_log_files = 3
# Type of RocksDB database compression to use.
# Available options are "zstd", "zlib", "bz2" and "lz4"
# It is best to use ZSTD as an overall good balance between speed/performance, storage, IO amplification, and CPU usage.
# For more performance but less compression (more storage used) and less CPU usage, use LZ4.
# See https://github.com/facebook/rocksdb/wiki/Compression for more details.
#
# Defaults to "zstd"
#rocksdb_compression_algo = "zstd"
# Level of compression the specified compression algorithm for RocksDB to use.
# Default is 32767, which is internally read by RocksDB as the default magic number and
# translated to the library's default compression level as they all differ.
# See their `kDefaultCompressionLevel`.
#
#rocksdb_compression_level = 32767
# Level of compression the specified compression algorithm for the bottommost level/data for RocksDB to use.
# Default is 32767, which is internally read by RocksDB as the default magic number and
# translated to the library's default compression level as they all differ.
# See their `kDefaultCompressionLevel`.
#
# Since this is the bottommost level (generally old and least used data), it may be desirable to have a very
# high compression level here as it's lesss likely for this data to be used. Research your chosen compression algorithm.
#
#rocksdb_bottommost_compression_level = 32767
# Whether to enable RocksDB "bottommost_compression".
# At the expense of more CPU usage, this will further compress the database to reduce more storage.
# It is recommended to use ZSTD compression with this for best compression results.
# See https://github.com/facebook/rocksdb/wiki/Compression for more details.
#
# Defaults to false as this uses more CPU when compressing.
#rocksdb_bottommost_compression = false
# Database recovery mode (for RocksDB WAL corruption)
#
# Use this option when the server reports corruption and refuses to start. Set mode 2 (PointInTime)
# to cleanly recover from this corruption. The server will continue from the last good state,
# several seconds or minutes prior to the crash. Clients may have to run "clear-cache & reload" to
# account for the rollback. Upon success, you may reset the mode back to default and restart again.
# Please note in some cases the corruption error may not be cleared for at least 30 minutes of
# operation in PointInTime mode.
#
# As a very last ditch effort, if PointInTime does not fix or resolve anything, you can try mode
# 3 (SkipAnyCorruptedRecord) but this will leave the server in a potentially inconsistent state.
#
# The default mode 1 (TolerateCorruptedTailRecords) will automatically drop the last entry in the
# database if corrupted during shutdown, but nothing more. It is extraordinarily unlikely this will
# desynchronize clients. To disable any form of silent rollback set mode 0 (AbsoluteConsistency).
#
# The options are:
# 0 = AbsoluteConsistency
# 1 = TolerateCorruptedTailRecords (default)
# 2 = PointInTime (use me if trying to recover)
# 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty)
#
# See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information
#
# Defaults to 1 (TolerateCorruptedTailRecords)
#rocksdb_recovery_mode = 1
# Controls whether memory buffers are written to storage at the fixed interval set by `cleanup_period_interval`
# even when they are not full. Setting this will increase load on the storage backplane and is never advised
# under normal circumstances.
#rocksdb_periodic_cleanup = false
### Domain Name Resolution and Caching
# Maximum entries stored in DNS memory-cache. The size of an entry may vary so please take care if
# raising this value excessively. Only decrease this when using an external DNS cache. Please note
# that systemd does *not* count as an external cache, even when configured to do so.
#dns_cache_entries = 12288
# Minimum time-to-live in seconds for entries in the DNS cache. The default may appear high to most
# administrators; this is by design. Only decrease this if you are using an external DNS cache.
#dns_min_ttl = 60 * 90
# Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. This value is critical for
# the server to federate efficiently. NXDOMAIN's are assumed to not be returning to the federation
# and aggressively cached rather than constantly rechecked.
#dns_min_ttl_nxdomain = 60 * 60 * 24 * 3
# The number of seconds to wait for a reply to a DNS query. Please note that recursive queries can
# take up to several seconds for some domains, so this value should not be too low.
#dns_timeout = 5
# Number of retries after a timeout.
#dns_attempts = 5
# Enable to query all nameservers until the domain is found. Referred to as "trust_negative_responses" in hickory_resolver.
# This can avoid useless DNS queries if the first nameserver responds with NXDOMAIN or an empty NOERROR response.
#
# The default is to query one nameserver and stop (false).
#query_all_nameservers = false
### Request Timeouts, Connection Timeouts, and Connection Pooling
## Request Timeouts are HTTP response timeouts
## Connection Timeouts are TCP connection timeouts
##
## Connection Pooling Timeouts are timeouts for keeping an open idle connection alive.
## Connection pooling and keepalive is very useful for federation or other places where for performance reasons,
## we want to keep connections open that we will re-use frequently due to TCP and TLS 1.3 overhead/expensiveness.
##
## Generally these defaults are the best, but if you find a reason to need to change these they are here.
# Default/base connection timeout
# This is used only by URL previews and update/news endpoint checks
#
# Defaults to 10 seconds
#request_conn_timeout = 10
# Default/base request timeout
# This is used only by URL previews and update/news endpoint checks
#
# Defaults to 35 seconds
#request_timeout = 35
# Default/base max idle connections per host
# This is used only by URL previews and update/news endpoint checks
#
# Defaults to 1 as generally the same open connection can be re-used
#request_idle_per_host = 1
# Default/base idle connection pool timeout
# This is used only by URL previews and update/news endpoint checks
#
# Defaults to 5 seconds
#request_idle_timeout = 5
# Federation well-known resolution connection timeout
#
# Defaults to 6 seconds
#well_known_conn_timeout = 6
# Federation HTTP well-known resolution request timeout
#
# Defaults to 10 seconds
#well_known_timeout = 10
# Federation client/server request timeout
# You most definitely want this to be high to account for extremely large room joins, slow homeservers, your own resources etc.
#
# Defaults to 300 seconds
#federation_timeout = 300
# Federation client/sender max idle connections per host
#
# Defaults to 1 as generally the same open connection can be re-used
#federation_idle_per_host = 1
# Federation client/sender idle connection pool timeout
#
# Defaults to 25 seconds
#federation_idle_timeout = 25
# Appservice URL request connection timeout
#
# Defaults to 120 seconds
#appservice_timeout = 120
# Appservice URL idle connection pool timeout
#
# Defaults to 300 seconds
#appservice_idle_timeout = 300
# Notification gateway pusher idle connection pool timeout
#
# Defaults to 15 seconds
#pusher_idle_timeout = 15
### Presence / Typing Indicators / Read Receipts
# Config option to control local (your server only) presence updates/requests. Defaults to true.
# Note that presence on conduwuit is very fast unlike Synapse's.
# If using outgoing presence, this MUST be enabled.
#
allow_local_presence = true
# Config option to control incoming federated presence updates/requests. Defaults to true.
# This option receives presence updates from other servers, but does not send any unless `allow_outgoing_presence` is true.
# Note that presence on conduwuit is very fast unlike Synapse's.
#
allow_incoming_presence = true
# Config option to control outgoing presence updates/requests. Defaults to true.
# This option sends presence updates to other servers, but does not receive any unless `allow_incoming_presence` is true.
# Note that presence on conduwuit is very fast unlike Synapse's.
# If using outgoing presence, you MUST enable `allow_local_presence` as well.
#
allow_outgoing_presence = true
# Config option to control how many seconds before presence updates that you are idle. Defaults to 5 minutes.
#presence_idle_timeout_s = 300
# Config option to control how many seconds before presence updates that you are offline. Defaults to 30 minutes.
#presence_offline_timeout_s = 1800
# Config option to control whether we should receive remote incoming read receipts.
# Defaults to true.
allow_incoming_read_receipts = true
# Config option to control outgoing typing updates to federation. Defaults to true.
allow_outgoing_typing = true
# Config option to control incoming typing updates from federation. Defaults to true.
allow_incoming_typing = true
# Config option to control maximum time federation user can indicate typing.
#typing_federation_timeout_s = 30
# Config option to control minimum time local client can indicate typing. This does not override
# a client's request to stop typing. It only enforces a minimum value in case of no stop request.
#typing_client_timeout_min_s = 15
# Config option to control maximum time local client can indicate typing.
#typing_client_timeout_max_s = 45
# Other options not in [global]:
#
#
# Enables running conduwuit with direct TLS support
# It is strongly recommended you use a reverse proxy instead. This is primarily relevant for test suites like complement that require a private CA setup.
[global.tls]
certs = "/conduwuit/certificate.crt"
key = "/conduwuit/private_key.key"
#
# Whether to listen and allow for HTTP and HTTPS connections (insecure!)
# This config option is only available if conduwuit was built with `axum_dual_protocol` feature (not default feature)
# Defaults to false
dual_protocol = true

View file

@ -1,596 +0,0 @@
{
"Action": "fail",
"Test": "TestBannedUserCannotSendJoin"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/join_event"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV1"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV1/leave_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV1/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV2"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV2/leave_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV2/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/invite_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/join_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/leave_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/non-state_membership_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/invite_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/join_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/knock_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/non-state_membership_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/event_with_mismatched_state_key"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/invite_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/join_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/knock_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/regular_event"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummaryJoinRules"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/max_depth"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/pagination"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/query_whole_graph"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/redact_link"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/suggested_only"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederation"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederation/good_connectivity"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederation/interrupted_connectivity"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederationOnRoomJoin"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederation/stopped_server"
}
{
"Action": "fail",
"Test": "TestEventAuth"
}
{
"Action": "fail",
"Test": "TestFederationKeyUploadQuery"
}
{
"Action": "fail",
"Test": "TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST"
}
{
"Action": "fail",
"Test": "TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"
}
{
"Action": "fail",
"Test": "TestFederationRejectInvite"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata"
}
{
"Action": "fail",
"Test": "TestGetMissingEventsGapFilling"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_shared_visibility"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_world_readable_visibility"
}
{
"Action": "fail",
"Test": "TestInboundFederationRejectsEventsWithRejectedAuthEvents"
}
{
"Action": "fail",
"Test": "TestJoinFederatedRoomFromApplicationServiceBridgeUser"
}
{
"Action": "fail",
"Test": "TestJumpToDateEndpoint"
}
{
"Action": "fail",
"Test": "TestKnocking"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"
}
{
"Action": "fail",
"Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"
}
{
"Action": "fail",
"Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"
}
{
"Action": "fail",
"Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"
}
{
"Action": "fail",
"Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"
}
{
"Action": "fail",
"Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock"
}
{
"Action": "fail",
"Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"
}
{
"Action": "fail",
"Test": "TestKnockRoomsInPublicRoomsDirectory"
}
{
"Action": "fail",
"Test": "TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestLocalPngThumbnail"
}
{
"Action": "fail",
"Test": "TestMediaFilenames"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/ASCII"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/Unicode"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation"
}
{
"Action": "fail",
"Test": "TestNetworkPartitionOrdering"
}
{
"Action": "fail",
"Test": "TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"
}
{
"Action": "fail",
"Test": "TestRemotePresence"
}
{
"Action": "fail",
"Test": "TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"
}
{
"Action": "fail",
"Test": "TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsLocalJoin"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoin"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinFailOver"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinLocalUser"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsSpacesSummaryFederation"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsSpacesSummaryLocal"
}
{
"Action": "fail",
"Test": "TestToDeviceMessagesOverFederation"
}
{
"Action": "fail",
"Test": "TestToDeviceMessagesOverFederation/interrupted_connectivity"
}
{
"Action": "fail",
"Test": "TestToDeviceMessagesOverFederation/stopped_server"
}
{
"Action": "fail",
"Test": "TestUnbanViaInvite"
}
{
"Action": "fail",
"Test": "TestUnknownEndpoints"
}
{
"Action": "fail",
"Test": "TestUnknownEndpoints/Key_endpoints"
}
{
"Action": "fail",
"Test": "TestUnrejectRejectedEvents"
}

View file

@ -1,896 +0,0 @@
{
"Action": "fail",
"Test": "TestBannedUserCannotSendJoin"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/join_event"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event"
}
{
"Action": "fail",
"Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV1"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV1/leave_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV1/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV2"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV2/leave_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonJoinViaSendJoinV2/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/invite_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/join_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/leave_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/non-state_membership_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonKnockViaSendKnock/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/invite_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/join_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/knock_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/non-state_membership_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV1/regular_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/event_with_mismatched_state_key"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/invite_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/join_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/knock_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event"
}
{
"Action": "fail",
"Test": "TestCannotSendNonLeaveViaSendLeaveV2/regular_event"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummaryJoinRules"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/max_depth"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/pagination"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/query_whole_graph"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/redact_link"
}
{
"Action": "fail",
"Test": "TestClientSpacesSummary/suggested_only"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederation"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederation/good_connectivity"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederation/interrupted_connectivity"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederationOnRoomJoin"
}
{
"Action": "fail",
"Test": "TestDeviceListsUpdateOverFederation/stopped_server"
}
{
"Action": "fail",
"Test": "TestEventAuth"
}
{
"Action": "fail",
"Test": "TestFederationKeyUploadQuery"
}
{
"Action": "fail",
"Test": "TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST"
}
{
"Action": "fail",
"Test": "TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"
}
{
"Action": "fail",
"Test": "TestFederationRejectInvite"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"
}
{
"Action": "fail",
"Test": "TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata"
}
{
"Action": "fail",
"Test": "TestGetMissingEventsGapFilling"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_shared_visibility"
}
{
"Action": "fail",
"Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_world_readable_visibility"
}
{
"Action": "fail",
"Test": "TestInboundFederationRejectsEventsWithRejectedAuthEvents"
}
{
"Action": "fail",
"Test": "TestJoinFederatedRoomFromApplicationServiceBridgeUser"
}
{
"Action": "fail",
"Test": "TestJumpToDateEndpoint"
}
{
"Action": "fail",
"Test": "TestKnocking"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it"
}
{
"Action": "fail",
"Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"
}
{
"Action": "fail",
"Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"
}
{
"Action": "fail",
"Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"
}
{
"Action": "fail",
"Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"
}
{
"Action": "fail",
"Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"
}
{
"Action": "fail",
"Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"
}
{
"Action": "fail",
"Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock"
}
{
"Action": "fail",
"Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"
}
{
"Action": "fail",
"Test": "TestKnockRoomsInPublicRoomsDirectory"
}
{
"Action": "fail",
"Test": "TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestLocalPngThumbnail"
}
{
"Action": "fail",
"Test": "TestMediaFilenames"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/ASCII"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/Unicode"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally"
}
{
"Action": "fail",
"Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation"
}
{
"Action": "fail",
"Test": "TestNetworkPartitionOrdering"
}
{
"Action": "fail",
"Test": "TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"
}
{
"Action": "fail",
"Test": "TestRemotePresence"
}
{
"Action": "fail",
"Test": "TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"
}
{
"Action": "fail",
"Test": "TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsLocalJoin"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoin"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinFailOver"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinLocalUser"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsSpacesSummaryFederation"
}
{
"Action": "fail",
"Test": "TestRestrictedRoomsSpacesSummaryLocal"
}
{
"Action": "fail",
"Test": "TestToDeviceMessagesOverFederation"
}
{
"Action": "fail",
"Test": "TestToDeviceMessagesOverFederation/interrupted_connectivity"
}
{
"Action": "fail",
"Test": "TestToDeviceMessagesOverFederation/stopped_server"
}
{
"Action": "fail",
"Test": "TestUnbanViaInvite"
}
{
"Action": "fail",
"Test": "TestUnknownEndpoints"
}
{
"Action": "fail",
"Test": "TestUnknownEndpoints/Key_endpoints"
}
{
"Action": "fail",
"Test": "TestUnrejectRejectedEvents"
}
{
"Action": "pass",
"Test": "TestACLs"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV1/invite_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV1/knock_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV1/non-state_membership_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV2/event_with_mismatched_state_key"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV2/invite_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV2/knock_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event"
}
{
"Action": "pass",
"Test": "TestFederatedClientSpaces"
}
{
"Action": "pass",
"Test": "TestFederationRedactSendsWithoutEvent"
}
{
"Action": "pass",
"Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"
}
{
"Action": "pass",
"Test": "TestInboundFederationKeys"
}
{
"Action": "pass",
"Test": "TestInboundFederationProfile"
}
{
"Action": "pass",
"Test": "TestInboundFederationProfile/Inbound_federation_can_query_profile_data"
}
{
"Action": "pass",
"Test": "TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"
}
{
"Action": "pass",
"Test": "TestIsDirectFlagFederation"
}
{
"Action": "pass",
"Test": "TestIsDirectFlagLocal"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomFailOver"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join"
}
{
"Action": "pass",
"Test": "TestJoinViaRoomIDAndServerName"
}
{
"Action": "pass",
"Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"
}
{
"Action": "pass",
"Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"
}
{
"Action": "pass",
"Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock"
}
{
"Action": "pass",
"Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock#01"
}
{
"Action": "pass",
"Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"
}
{
"Action": "pass",
"Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"
}
{
"Action": "pass",
"Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"
}
{
"Action": "pass",
"Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"
}
{
"Action": "pass",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'"
}
{
"Action": "pass",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name"
}
{
"Action": "pass",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_upload_with_ASCII_file_name"
}
{
"Action": "pass",
"Test": "TestMediaFilenames/Parallel/Unicode/Can_upload_with_Unicode_file_name"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName/parallel"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_locally"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_over_federation"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName/parallel/Can_upload_without_a_file_name"
}
{
"Action": "pass",
"Test": "TestOutboundFederationProfile"
}
{
"Action": "pass",
"Test": "TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"
}
{
"Action": "pass",
"Test": "TestOutboundFederationSend"
}
{
"Action": "pass",
"Test": "TestRemoteAliasRequestsUnderstandUnicode"
}
{
"Action": "pass",
"Test": "TestRemotePngThumbnail"
}
{
"Action": "pass",
"Test": "TestRemoteTyping"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_initially"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_initially"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited"
}
{
"Action": "pass",
"Test": "TestToDeviceMessagesOverFederation/good_connectivity"
}
{
"Action": "pass",
"Test": "TestUnknownEndpoints/Client-server_endpoints"
}
{
"Action": "pass",
"Test": "TestUnknownEndpoints/Media_endpoints"
}
{
"Action": "pass",
"Test": "TestUnknownEndpoints/Server-server_endpoints"
}
{
"Action": "pass",
"Test": "TestUnknownEndpoints/Unknown_prefix"
}
{
"Action": "pass",
"Test": "TestUserAppearsInChangedDeviceListOnJoinOverFederation"
}
{
"Action": "pass",
"Test": "TestWriteMDirectAccountData"
}
{
"Action": "skip",
"Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_as_inline"
}
{
"Action": "skip",
"Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_with_parameters_as_inline"
}
{
"Action": "skip",
"Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_unsafe_media_types_as_attachments"
}
{
"Action": "skip",
"Test": "TestSendJoinPartialStateResponse"
}

View file

@ -1,284 +0,0 @@
{
"Action": "pass",
"Test": "TestACLs"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV1/invite_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV1/knock_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV1/non-state_membership_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV2/event_with_mismatched_state_key"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV2/invite_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV2/knock_event"
}
{
"Action": "pass",
"Test": "TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event"
}
{
"Action": "pass",
"Test": "TestFederatedClientSpaces"
}
{
"Action": "pass",
"Test": "TestFederationRedactSendsWithoutEvent"
}
{
"Action": "pass",
"Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"
}
{
"Action": "pass",
"Test": "TestInboundFederationKeys"
}
{
"Action": "pass",
"Test": "TestInboundFederationProfile"
}
{
"Action": "pass",
"Test": "TestInboundFederationProfile/Inbound_federation_can_query_profile_data"
}
{
"Action": "pass",
"Test": "TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"
}
{
"Action": "pass",
"Test": "TestIsDirectFlagFederation"
}
{
"Action": "pass",
"Test": "TestIsDirectFlagLocal"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomFailOver"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join"
}
{
"Action": "pass",
"Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join"
}
{
"Action": "pass",
"Test": "TestJoinViaRoomIDAndServerName"
}
{
"Action": "pass",
"Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"
}
{
"Action": "pass",
"Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"
}
{
"Action": "pass",
"Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock"
}
{
"Action": "pass",
"Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock#01"
}
{
"Action": "pass",
"Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"
}
{
"Action": "pass",
"Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"
}
{
"Action": "pass",
"Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"
}
{
"Action": "pass",
"Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"
}
{
"Action": "pass",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'"
}
{
"Action": "pass",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name"
}
{
"Action": "pass",
"Test": "TestMediaFilenames/Parallel/ASCII/Can_upload_with_ASCII_file_name"
}
{
"Action": "pass",
"Test": "TestMediaFilenames/Parallel/Unicode/Can_upload_with_Unicode_file_name"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName/parallel"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_locally"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_over_federation"
}
{
"Action": "pass",
"Test": "TestMediaWithoutFileName/parallel/Can_upload_without_a_file_name"
}
{
"Action": "pass",
"Test": "TestOutboundFederationProfile"
}
{
"Action": "pass",
"Test": "TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"
}
{
"Action": "pass",
"Test": "TestOutboundFederationSend"
}
{
"Action": "pass",
"Test": "TestRemoteAliasRequestsUnderstandUnicode"
}
{
"Action": "pass",
"Test": "TestRemotePngThumbnail"
}
{
"Action": "pass",
"Test": "TestRemoteTyping"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_initially"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_initially"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules"
}
{
"Action": "pass",
"Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited"
}
{
"Action": "pass",
"Test": "TestToDeviceMessagesOverFederation/good_connectivity"
}
{
"Action": "pass",
"Test": "TestUnknownEndpoints/Client-server_endpoints"
}
{
"Action": "pass",
"Test": "TestUnknownEndpoints/Media_endpoints"
}
{
"Action": "pass",
"Test": "TestUnknownEndpoints/Server-server_endpoints"
}
{
"Action": "pass",
"Test": "TestUnknownEndpoints/Unknown_prefix"
}
{
"Action": "pass",
"Test": "TestUserAppearsInChangedDeviceListOnJoinOverFederation"
}
{
"Action": "pass",
"Test": "TestWriteMDirectAccountData"
}