mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-07-07 23:26:24 +02:00
Merge branch 'main' into main
This commit is contained in:
commit
afcf9d41ed
74 changed files with 1193 additions and 888 deletions
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
|
@ -3,15 +3,10 @@ name: CI and Artifacts
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
push:
|
||||||
# documentation workflow deals with this or is not relevant for this workflow
|
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- '*.md'
|
|
||||||
- 'conduwuit-example.toml'
|
|
||||||
- 'book.toml'
|
|
||||||
- '.gitlab-ci.yml'
|
- '.gitlab-ci.yml'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
- 'renovate.json'
|
- 'renovate.json'
|
||||||
- 'docs/**'
|
|
||||||
- 'debian/**'
|
- 'debian/**'
|
||||||
- 'docker/**'
|
- 'docker/**'
|
||||||
branches:
|
branches:
|
||||||
|
@ -23,7 +18,7 @@ on:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.head_ref || github.ref_name }}
|
group: ${{ github.head_ref || github.ref_name }}
|
||||||
cancel-in-progress: false
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# sccache only on main repo
|
# sccache only on main repo
|
||||||
|
|
|
@ -131,7 +131,8 @@ allowed to be licenced under the Apache-2.0 licence and all of your conduct is
|
||||||
in line with the Contributor's Covenant, and conduwuit's Code of Conduct.
|
in line with the Contributor's Covenant, and conduwuit's Code of Conduct.
|
||||||
|
|
||||||
Contribution by users who violate either of these code of conducts will not have
|
Contribution by users who violate either of these code of conducts will not have
|
||||||
their contributions accepted.
|
their contributions accepted. This includes users who have been banned from
|
||||||
|
conduwuit Matrix rooms for Code of Conduct violations.
|
||||||
|
|
||||||
[issues]: https://github.com/girlbossceo/conduwuit/issues
|
[issues]: https://github.com/girlbossceo/conduwuit/issues
|
||||||
[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay
|
[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay
|
||||||
|
|
270
Cargo.lock
generated
270
Cargo.lock
generated
|
@ -94,9 +94,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-compression"
|
name = "async-compression"
|
||||||
version = "0.4.17"
|
version = "0.4.18"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857"
|
checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"brotli",
|
"brotli",
|
||||||
"flate2",
|
"flate2",
|
||||||
|
@ -127,7 +127,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -138,7 +138,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -164,21 +164,20 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aws-lc-rs"
|
name = "aws-lc-rs"
|
||||||
version = "1.11.0"
|
version = "1.11.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fe7c2840b66236045acd2607d5866e274380afd87ef99d6226e961e2cb47df45"
|
checksum = "f47bb8cc16b669d267eeccf585aea077d0882f4777b1c1f740217885d6e6e5a3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aws-lc-sys",
|
"aws-lc-sys",
|
||||||
"mirai-annotations",
|
|
||||||
"paste",
|
"paste",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aws-lc-sys"
|
name = "aws-lc-sys"
|
||||||
version = "0.23.0"
|
version = "0.23.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ad3a619a9de81e1d7de1f1186dcba4506ed661a0e483d84410fdef0ee87b2f96"
|
checksum = "a2101df3813227bbaaaa0b04cd61c534c7954b22bd68d399b440be937dc63ff7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bindgen",
|
"bindgen",
|
||||||
"cc",
|
"cc",
|
||||||
|
@ -191,9 +190,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "axum"
|
name = "axum"
|
||||||
version = "0.7.8"
|
version = "0.7.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "49c41b948da08fb481a94546cd874843adc1142278b0af4badf9b1b78599d68d"
|
checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum-core",
|
"axum-core",
|
||||||
|
@ -215,7 +214,7 @@ dependencies = [
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_path_to_error",
|
"serde_path_to_error",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
"sync_wrapper 1.0.1",
|
"sync_wrapper 1.0.2",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tower 0.5.1",
|
"tower 0.5.1",
|
||||||
"tower-layer",
|
"tower-layer",
|
||||||
|
@ -249,7 +248,7 @@ dependencies = [
|
||||||
"mime",
|
"mime",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"rustversion",
|
"rustversion",
|
||||||
"sync_wrapper 1.0.1",
|
"sync_wrapper 1.0.2",
|
||||||
"tower-layer",
|
"tower-layer",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"tracing",
|
"tracing",
|
||||||
|
@ -257,9 +256,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "axum-extra"
|
name = "axum-extra"
|
||||||
version = "0.9.5"
|
version = "0.9.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "37634d71e9f3c35cfb1c30c87c7cba500d55892f04c2dbe6a99383c664b820b0"
|
checksum = "c794b30c904f0a1c2fb7740f7df7f7972dfaa14ef6f57cb6178dc63e5dca2f04"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"axum",
|
"axum",
|
||||||
"axum-core",
|
"axum-core",
|
||||||
|
@ -292,7 +291,7 @@ dependencies = [
|
||||||
"hyper",
|
"hyper",
|
||||||
"hyper-util",
|
"hyper-util",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"rustls-pemfile",
|
"rustls-pemfile",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -312,7 +311,7 @@ dependencies = [
|
||||||
"http",
|
"http",
|
||||||
"http-body-util",
|
"http-body-util",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls",
|
"tokio-rustls",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
|
@ -372,7 +371,7 @@ dependencies = [
|
||||||
"regex",
|
"regex",
|
||||||
"rustc-hash 1.1.0",
|
"rustc-hash 1.1.0",
|
||||||
"shlex",
|
"shlex",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
"which",
|
"which",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -435,9 +434,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bytemuck"
|
name = "bytemuck"
|
||||||
version = "1.19.0"
|
version = "1.20.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d"
|
checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "byteorder"
|
name = "byteorder"
|
||||||
|
@ -574,7 +573,7 @@ dependencies = [
|
||||||
"heck",
|
"heck",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -755,7 +754,7 @@ dependencies = [
|
||||||
"itertools 0.13.0",
|
"itertools 0.13.0",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -779,7 +778,7 @@ dependencies = [
|
||||||
"hyper-util",
|
"hyper-util",
|
||||||
"log",
|
"log",
|
||||||
"ruma",
|
"ruma",
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"sd-notify",
|
"sd-notify",
|
||||||
"sentry",
|
"sentry",
|
||||||
"sentry-tower",
|
"sentry-tower",
|
||||||
|
@ -896,9 +895,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "core-foundation"
|
name = "core-foundation"
|
||||||
version = "0.9.4"
|
version = "0.10.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
|
checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"core-foundation-sys",
|
"core-foundation-sys",
|
||||||
"libc",
|
"libc",
|
||||||
|
@ -912,9 +911,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cpufeatures"
|
name = "cpufeatures"
|
||||||
version = "0.2.15"
|
version = "0.2.16"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6"
|
checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
|
@ -1048,12 +1047,12 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ctor"
|
name = "ctor"
|
||||||
version = "0.2.8"
|
version = "0.2.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f"
|
checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1080,7 +1079,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1149,7 +1148,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1201,7 +1200,7 @@ dependencies = [
|
||||||
"heck",
|
"heck",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1367,7 +1366,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1447,9 +1446,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "h2"
|
name = "h2"
|
||||||
version = "0.4.6"
|
version = "0.4.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205"
|
checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"atomic-waker",
|
"atomic-waker",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
@ -1633,7 +1632,7 @@ dependencies = [
|
||||||
"markup5ever",
|
"markup5ever",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1699,9 +1698,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper"
|
name = "hyper"
|
||||||
version = "1.5.0"
|
version = "1.5.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a"
|
checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
|
@ -1728,7 +1727,7 @@ dependencies = [
|
||||||
"http",
|
"http",
|
||||||
"hyper",
|
"hyper",
|
||||||
"hyper-util",
|
"hyper-util",
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"rustls-native-certs",
|
"rustls-native-certs",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -1885,7 +1884,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2031,9 +2030,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "itoa"
|
name = "itoa"
|
||||||
version = "1.0.11"
|
version = "1.0.13"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
|
checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jobserver"
|
name = "jobserver"
|
||||||
|
@ -2086,9 +2085,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "konst"
|
name = "konst"
|
||||||
version = "0.3.9"
|
version = "0.3.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "50a0ba6de5f7af397afff922f22c149ff605c766cd3269cf6c1cd5e466dbe3b9"
|
checksum = "b65f00fb3910881e52bf0850ae2a82aea411488a557e1c02820ceaa60963dce3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"const_panic",
|
"const_panic",
|
||||||
"konst_kernel",
|
"konst_kernel",
|
||||||
|
@ -2097,9 +2096,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "konst_kernel"
|
name = "konst_kernel"
|
||||||
version = "0.3.9"
|
version = "0.3.12"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "be0a455a1719220fd6adf756088e1c69a85bf14b6a9e24537a5cc04f503edb2b"
|
checksum = "599c1232f55c72c7fc378335a3efe1c878c92720838c8e6a4fd87784ef7764de"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"typewit",
|
"typewit",
|
||||||
]
|
]
|
||||||
|
@ -2124,7 +2123,7 @@ dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"regex",
|
"regex",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2141,9 +2140,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libc"
|
name = "libc"
|
||||||
version = "0.2.162"
|
version = "0.2.164"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398"
|
checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libloading"
|
name = "libloading"
|
||||||
|
@ -2180,9 +2179,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "litemap"
|
name = "litemap"
|
||||||
version = "0.7.3"
|
version = "0.7.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704"
|
checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lock_api"
|
name = "lock_api"
|
||||||
|
@ -2338,12 +2337,6 @@ dependencies = [
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "mirai-annotations"
|
|
||||||
version = "1.12.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "new_debug_unreachable"
|
name = "new_debug_unreachable"
|
||||||
version = "1.0.6"
|
version = "1.0.6"
|
||||||
|
@ -2656,7 +2649,7 @@ dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"proc-macro2-diagnostics",
|
"proc-macro2-diagnostics",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2739,7 +2732,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2811,7 +2804,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033"
|
checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2825,9 +2818,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "proc-macro2"
|
name = "proc-macro2"
|
||||||
version = "1.0.89"
|
version = "1.0.92"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e"
|
checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
@ -2840,7 +2833,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
"version_check",
|
"version_check",
|
||||||
"yansi",
|
"yansi",
|
||||||
]
|
]
|
||||||
|
@ -2865,7 +2858,7 @@ dependencies = [
|
||||||
"itertools 0.13.0",
|
"itertools 0.13.0",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2918,7 +2911,7 @@ dependencies = [
|
||||||
"quinn-proto",
|
"quinn-proto",
|
||||||
"quinn-udp",
|
"quinn-udp",
|
||||||
"rustc-hash 2.0.0",
|
"rustc-hash 2.0.0",
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"socket2",
|
"socket2",
|
||||||
"thiserror 2.0.3",
|
"thiserror 2.0.3",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -2936,7 +2929,7 @@ dependencies = [
|
||||||
"rand",
|
"rand",
|
||||||
"ring",
|
"ring",
|
||||||
"rustc-hash 2.0.0",
|
"rustc-hash 2.0.0",
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"slab",
|
"slab",
|
||||||
"thiserror 2.0.3",
|
"thiserror 2.0.3",
|
||||||
|
@ -3079,14 +3072,14 @@ dependencies = [
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"quinn",
|
"quinn",
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"rustls-native-certs",
|
"rustls-native-certs",
|
||||||
"rustls-pemfile",
|
"rustls-pemfile",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
"sync_wrapper 1.0.1",
|
"sync_wrapper 1.0.2",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls",
|
"tokio-rustls",
|
||||||
"tokio-socks",
|
"tokio-socks",
|
||||||
|
@ -3128,7 +3121,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma"
|
name = "ruma"
|
||||||
version = "0.10.1"
|
version = "0.10.1"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assign",
|
"assign",
|
||||||
"js_int",
|
"js_int",
|
||||||
|
@ -3150,7 +3143,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-appservice-api"
|
name = "ruma-appservice-api"
|
||||||
version = "0.10.0"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -3162,7 +3155,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-client-api"
|
name = "ruma-client-api"
|
||||||
version = "0.18.0"
|
version = "0.18.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"assign",
|
"assign",
|
||||||
|
@ -3185,7 +3178,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-common"
|
name = "ruma-common"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
|
@ -3215,7 +3208,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-events"
|
name = "ruma-events"
|
||||||
version = "0.28.1"
|
version = "0.28.1"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"indexmap 2.6.0",
|
"indexmap 2.6.0",
|
||||||
|
@ -3239,7 +3232,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-federation-api"
|
name = "ruma-federation-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"http",
|
"http",
|
||||||
|
@ -3257,7 +3250,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-identifiers-validation"
|
name = "ruma-identifiers-validation"
|
||||||
version = "0.9.5"
|
version = "0.9.5"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"thiserror 2.0.3",
|
"thiserror 2.0.3",
|
||||||
|
@ -3266,7 +3259,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-identity-service-api"
|
name = "ruma-identity-service-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -3276,7 +3269,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-macros"
|
name = "ruma-macros"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
|
@ -3285,14 +3278,14 @@ dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
"ruma-identifiers-validation",
|
"ruma-identifiers-validation",
|
||||||
"serde",
|
"serde",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
"toml",
|
"toml",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-push-gateway-api"
|
name = "ruma-push-gateway-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -3304,7 +3297,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-server-util"
|
name = "ruma-server-util"
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"headers",
|
"headers",
|
||||||
"http",
|
"http",
|
||||||
|
@ -3317,7 +3310,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-signatures"
|
name = "ruma-signatures"
|
||||||
version = "0.15.0"
|
version = "0.15.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"ed25519-dalek",
|
"ed25519-dalek",
|
||||||
|
@ -3333,7 +3326,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-state-res"
|
name = "ruma-state-res"
|
||||||
version = "0.11.0"
|
version = "0.11.0"
|
||||||
source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"itertools 0.13.0",
|
"itertools 0.13.0",
|
||||||
|
@ -3409,9 +3402,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustix"
|
name = "rustix"
|
||||||
version = "0.38.40"
|
version = "0.38.41"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0"
|
checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.6.0",
|
"bitflags 2.6.0",
|
||||||
"errno",
|
"errno",
|
||||||
|
@ -3436,9 +3429,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustls"
|
name = "rustls"
|
||||||
version = "0.23.16"
|
version = "0.23.18"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e"
|
checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aws-lc-rs",
|
"aws-lc-rs",
|
||||||
"log",
|
"log",
|
||||||
|
@ -3452,12 +3445,11 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustls-native-certs"
|
name = "rustls-native-certs"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a"
|
checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"openssl-probe",
|
"openssl-probe",
|
||||||
"rustls-pemfile",
|
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"schannel",
|
"schannel",
|
||||||
"security-framework",
|
"security-framework",
|
||||||
|
@ -3502,16 +3494,16 @@ checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248"
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustyline-async"
|
name = "rustyline-async"
|
||||||
version = "0.4.3"
|
version = "0.4.3"
|
||||||
source = "git+https://github.com/girlbossceo/rustyline-async?rev=9654cc84e19241f6e19021eb8e677892656f5071#9654cc84e19241f6e19021eb8e677892656f5071"
|
source = "git+https://github.com/girlbossceo/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crossterm",
|
"crossterm",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"thingbuf",
|
"thingbuf",
|
||||||
"thiserror 1.0.69",
|
"thiserror 2.0.3",
|
||||||
"unicode-segmentation",
|
"unicode-segmentation",
|
||||||
"unicode-width",
|
"unicode-width 0.2.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -3531,9 +3523,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "schannel"
|
name = "schannel"
|
||||||
version = "0.1.26"
|
version = "0.1.27"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1"
|
checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"windows-sys 0.59.0",
|
"windows-sys 0.59.0",
|
||||||
]
|
]
|
||||||
|
@ -3552,9 +3544,9 @@ checksum = "1be20c5f7f393ee700f8b2f28ea35812e4e212f40774b550cd2a93ea91684451"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "security-framework"
|
name = "security-framework"
|
||||||
version = "2.11.1"
|
version = "3.0.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
|
checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.6.0",
|
"bitflags 2.6.0",
|
||||||
"core-foundation",
|
"core-foundation",
|
||||||
|
@ -3731,7 +3723,7 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -3749,9 +3741,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_json"
|
name = "serde_json"
|
||||||
version = "1.0.132"
|
version = "1.0.133"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
|
checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itoa",
|
"itoa",
|
||||||
"memchr",
|
"memchr",
|
||||||
|
@ -4008,9 +4000,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "syn"
|
name = "syn"
|
||||||
version = "2.0.87"
|
version = "2.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d"
|
checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -4025,9 +4017,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sync_wrapper"
|
name = "sync_wrapper"
|
||||||
version = "1.0.1"
|
version = "1.0.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394"
|
checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-core",
|
"futures-core",
|
||||||
]
|
]
|
||||||
|
@ -4040,7 +4032,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4056,9 +4048,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "termimad"
|
name = "termimad"
|
||||||
version = "0.31.0"
|
version = "0.31.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9cda3a7471f9978706978454c45ef8dda67e9f8f3cdb9319eb2e9323deb6ae62"
|
checksum = "ea6a5d4cf55d9f1cb04fcda48f725772d0733ae34e030dfc4dd36e738a5965f4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"coolor",
|
"coolor",
|
||||||
"crokey",
|
"crokey",
|
||||||
|
@ -4067,7 +4059,7 @@ dependencies = [
|
||||||
"minimad",
|
"minimad",
|
||||||
"serde",
|
"serde",
|
||||||
"thiserror 1.0.69",
|
"thiserror 1.0.69",
|
||||||
"unicode-width",
|
"unicode-width 0.1.14",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4106,7 +4098,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4117,7 +4109,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4262,7 +4254,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4283,7 +4275,7 @@ version = "0.26.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4"
|
checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
@ -4425,9 +4417,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tower-http"
|
name = "tower-http"
|
||||||
version = "0.6.1"
|
version = "0.6.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97"
|
checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-compression",
|
"async-compression",
|
||||||
"bitflags 2.6.0",
|
"bitflags 2.6.0",
|
||||||
|
@ -4476,7 +4468,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b47
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4603,9 +4595,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-ident"
|
name = "unicode-ident"
|
||||||
version = "1.0.13"
|
version = "1.0.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
|
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-normalization"
|
name = "unicode-normalization"
|
||||||
|
@ -4628,6 +4620,12 @@ version = "0.1.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "unicode-width"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unsafe-libyaml"
|
name = "unsafe-libyaml"
|
||||||
version = "0.2.11"
|
version = "0.2.11"
|
||||||
|
@ -4649,7 +4647,7 @@ dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"log",
|
"log",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"rustls 0.23.16",
|
"rustls 0.23.18",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"url",
|
"url",
|
||||||
"webpki-roots",
|
"webpki-roots",
|
||||||
|
@ -4657,9 +4655,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "url"
|
name = "url"
|
||||||
version = "2.5.3"
|
version = "2.5.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada"
|
checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"form_urlencoded",
|
"form_urlencoded",
|
||||||
"idna 1.0.3",
|
"idna 1.0.3",
|
||||||
|
@ -4756,7 +4754,7 @@ dependencies = [
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
"wasm-bindgen-shared",
|
"wasm-bindgen-shared",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -4790,7 +4788,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
"wasm-bindgen-backend",
|
"wasm-bindgen-backend",
|
||||||
"wasm-bindgen-shared",
|
"wasm-bindgen-shared",
|
||||||
]
|
]
|
||||||
|
@ -4845,9 +4843,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "webpki-roots"
|
name = "webpki-roots"
|
||||||
version = "0.26.6"
|
version = "0.26.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958"
|
checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
]
|
]
|
||||||
|
@ -5151,9 +5149,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "yoke"
|
name = "yoke"
|
||||||
version = "0.7.4"
|
version = "0.7.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5"
|
checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"stable_deref_trait",
|
"stable_deref_trait",
|
||||||
|
@ -5163,13 +5161,13 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "yoke-derive"
|
name = "yoke-derive"
|
||||||
version = "0.7.4"
|
version = "0.7.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95"
|
checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
"synstructure",
|
"synstructure",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -5191,27 +5189,27 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerofrom"
|
name = "zerofrom"
|
||||||
version = "0.1.4"
|
version = "0.1.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55"
|
checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"zerofrom-derive",
|
"zerofrom-derive",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerofrom-derive"
|
name = "zerofrom-derive"
|
||||||
version = "0.1.4"
|
version = "0.1.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5"
|
checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
"synstructure",
|
"synstructure",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -5240,7 +5238,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 2.0.87",
|
"syn 2.0.89",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|
30
Cargo.toml
30
Cargo.toml
|
@ -33,7 +33,7 @@ features = ["std", "serde"]
|
||||||
version = "0.5.7"
|
version = "0.5.7"
|
||||||
|
|
||||||
[workspace.dependencies.ctor]
|
[workspace.dependencies.ctor]
|
||||||
version = "0.2.8"
|
version = "0.2.9"
|
||||||
|
|
||||||
[workspace.dependencies.cargo_toml]
|
[workspace.dependencies.cargo_toml]
|
||||||
version = "0.20"
|
version = "0.20"
|
||||||
|
@ -82,7 +82,7 @@ version = "1.1.0"
|
||||||
version = "1.11.1"
|
version = "1.11.1"
|
||||||
|
|
||||||
[workspace.dependencies.axum]
|
[workspace.dependencies.axum]
|
||||||
version = "0.7.5"
|
version = "0.7.9"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"form",
|
"form",
|
||||||
|
@ -95,7 +95,7 @@ features = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.axum-extra]
|
[workspace.dependencies.axum-extra]
|
||||||
version = "0.9.4"
|
version = "0.9.6"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["typed-header", "tracing"]
|
features = ["typed-header", "tracing"]
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ default-features = false
|
||||||
features = ["util"]
|
features = ["util"]
|
||||||
|
|
||||||
[workspace.dependencies.tower-http]
|
[workspace.dependencies.tower-http]
|
||||||
version = "0.6.1"
|
version = "0.6.2"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"add-extension",
|
"add-extension",
|
||||||
|
@ -149,7 +149,7 @@ default-features = false
|
||||||
features = ["rc"]
|
features = ["rc"]
|
||||||
|
|
||||||
[workspace.dependencies.serde_json]
|
[workspace.dependencies.serde_json]
|
||||||
version = "1.0.132"
|
version = "1.0.133"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["raw_value"]
|
features = ["raw_value"]
|
||||||
|
|
||||||
|
@ -207,14 +207,13 @@ default-features = false
|
||||||
version = "4.5.21"
|
version = "4.5.21"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"std",
|
|
||||||
"derive",
|
"derive",
|
||||||
"help",
|
"env",
|
||||||
#"color", Do we need these?
|
|
||||||
#"unicode",
|
|
||||||
"usage",
|
|
||||||
"error-context",
|
"error-context",
|
||||||
|
"help",
|
||||||
|
"std",
|
||||||
"string",
|
"string",
|
||||||
|
"usage",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.futures]
|
[workspace.dependencies.futures]
|
||||||
|
@ -244,7 +243,7 @@ version = "0.8.5"
|
||||||
|
|
||||||
# Validating urls in config, was already a transitive dependency
|
# Validating urls in config, was already a transitive dependency
|
||||||
[workspace.dependencies.url]
|
[workspace.dependencies.url]
|
||||||
version = "2.5.3"
|
version = "2.5.4"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["serde"]
|
features = ["serde"]
|
||||||
|
|
||||||
|
@ -255,7 +254,7 @@ features = ["alloc", "std"]
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.hyper]
|
[workspace.dependencies.hyper]
|
||||||
version = "1.5.0"
|
version = "1.5.1"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"server",
|
"server",
|
||||||
|
@ -322,7 +321,7 @@ version = "0.1.2"
|
||||||
[workspace.dependencies.ruma]
|
[workspace.dependencies.ruma]
|
||||||
git = "https://github.com/girlbossceo/ruwuma"
|
git = "https://github.com/girlbossceo/ruwuma"
|
||||||
#branch = "conduwuit-changes"
|
#branch = "conduwuit-changes"
|
||||||
rev = "2ab432fba19eb8862c594d24af39d8f9f6b4eac6"
|
rev = "97e2fb6df13f65532d33fc2f0f097ad5a449dd70"
|
||||||
features = [
|
features = [
|
||||||
"compat",
|
"compat",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -335,6 +334,7 @@ features = [
|
||||||
"server-util",
|
"server-util",
|
||||||
"unstable-exhaustive-types",
|
"unstable-exhaustive-types",
|
||||||
"ring-compat",
|
"ring-compat",
|
||||||
|
"compat-upload-signatures",
|
||||||
"identifiers-validation",
|
"identifiers-validation",
|
||||||
"unstable-unspecified",
|
"unstable-unspecified",
|
||||||
"unstable-msc2409",
|
"unstable-msc2409",
|
||||||
|
@ -460,7 +460,7 @@ version = "0.4.3"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.termimad]
|
[workspace.dependencies.termimad]
|
||||||
version = "0.31.0"
|
version = "0.31.1"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.checked_ops]
|
[workspace.dependencies.checked_ops]
|
||||||
|
@ -504,7 +504,7 @@ rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91"
|
||||||
# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b
|
# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b
|
||||||
[patch.crates-io.rustyline-async]
|
[patch.crates-io.rustyline-async]
|
||||||
git = "https://github.com/girlbossceo/rustyline-async"
|
git = "https://github.com/girlbossceo/rustyline-async"
|
||||||
rev = "9654cc84e19241f6e19021eb8e677892656f5071"
|
rev = "deaeb0694e2083f53d363b648da06e10fc13900c"
|
||||||
|
|
||||||
#
|
#
|
||||||
# Our crates
|
# Our crates
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# conduwuit
|
# conduwuit
|
||||||
|
|
||||||
`main`: [](https://matrix.to/#/#conduwuit:puppygock.gay) [](https://matrix.to/#/#conduwuit-space:puppygock.gay) [](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml)
|
||||||
Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml)
|
|
||||||
|
|
||||||
<!-- ANCHOR: catchphrase -->
|
<!-- ANCHOR: catchphrase -->
|
||||||
|
|
||||||
|
|
|
@ -158,6 +158,10 @@
|
||||||
#
|
#
|
||||||
#eventidshort_cache_capacity = varies by system
|
#eventidshort_cache_capacity = varies by system
|
||||||
|
|
||||||
|
# This item is undocumented. Please contribute documentation for it.
|
||||||
|
#
|
||||||
|
#eventid_pdu_cache_capacity = varies by system
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# This item is undocumented. Please contribute documentation for it.
|
||||||
#
|
#
|
||||||
#shortstatekey_cache_capacity = varies by system
|
#shortstatekey_cache_capacity = varies by system
|
||||||
|
@ -1350,6 +1354,13 @@
|
||||||
|
|
||||||
[global.well_known]
|
[global.well_known]
|
||||||
|
|
||||||
|
# The server URL that the client well-known file will serve. This should
|
||||||
|
# not contain a port, and should just be a valid HTTPS URL.
|
||||||
|
#
|
||||||
|
# example: "https://matrix.example.com"
|
||||||
|
#
|
||||||
|
#client =
|
||||||
|
|
||||||
# The server base domain of the URL with a specific port that the server
|
# The server base domain of the URL with a specific port that the server
|
||||||
# well-known file will serve. This should contain a port at the end, and
|
# well-known file will serve. This should contain a port at the end, and
|
||||||
# should not be a URL.
|
# should not be a URL.
|
||||||
|
@ -1358,13 +1369,6 @@
|
||||||
#
|
#
|
||||||
#server =
|
#server =
|
||||||
|
|
||||||
# The server URL that the client well-known file will serve. This should
|
|
||||||
# not contain a port, and should just be a valid HTTPS URL.
|
|
||||||
#
|
|
||||||
# example: "https://matrix.example.com"
|
|
||||||
#
|
|
||||||
#client =
|
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# This item is undocumented. Please contribute documentation for it.
|
||||||
#
|
#
|
||||||
#support_page =
|
#support_page =
|
||||||
|
|
15
debian/README.md
vendored
15
debian/README.md
vendored
|
@ -1,17 +1,22 @@
|
||||||
# conduwuit for Debian
|
# conduwuit for Debian
|
||||||
|
|
||||||
Information about downloading and deploying the Debian package. This may also be referenced for other `apt`-based distros such as Ubuntu.
|
Information about downloading and deploying the Debian package. This may also be
|
||||||
|
referenced for other `apt`-based distros such as Ubuntu.
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
It is recommended to see the [generic deployment guide](../deploying/generic.md) for further information if needed as usage of the Debian package is generally related.
|
It is recommended to see the [generic deployment guide](../deploying/generic.md)
|
||||||
|
for further information if needed as usage of the Debian package is generally
|
||||||
|
related.
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml` as the default config. At the minimum, you will need to change your `server_name` here.
|
When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml`
|
||||||
|
as the default config. The config mentions things required to be changed before
|
||||||
|
starting.
|
||||||
|
|
||||||
You can tweak more detailed settings by uncommenting and setting the config options
|
You can tweak more detailed settings by uncommenting and setting the config
|
||||||
in `/etc/conduwuit/conduwuit.toml`.
|
options in `/etc/conduwuit/conduwuit.toml`.
|
||||||
|
|
||||||
### Running
|
### Running
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
- [Generic](deploying/generic.md)
|
- [Generic](deploying/generic.md)
|
||||||
- [NixOS](deploying/nixos.md)
|
- [NixOS](deploying/nixos.md)
|
||||||
- [Docker](deploying/docker.md)
|
- [Docker](deploying/docker.md)
|
||||||
|
- [Kubernetes](deploying/kubernetes.md)
|
||||||
- [Arch Linux](deploying/arch-linux.md)
|
- [Arch Linux](deploying/arch-linux.md)
|
||||||
- [Debian](deploying/debian.md)
|
- [Debian](deploying/debian.md)
|
||||||
- [FreeBSD](deploying/freebsd.md)
|
- [FreeBSD](deploying/freebsd.md)
|
||||||
|
|
|
@ -11,9 +11,9 @@ OCI images for conduwuit are available in the registries listed below.
|
||||||
|
|
||||||
| Registry | Image | Size | Notes |
|
| Registry | Image | Size | Notes |
|
||||||
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
||||||
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable tagged image. |
|
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||||
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable tagged image. |
|
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||||
| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable tagged image. |
|
| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||||
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. |
|
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. |
|
||||||
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. |
|
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. |
|
||||||
| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. |
|
| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. |
|
||||||
|
@ -92,16 +92,28 @@ Additional info about deploying conduwuit can be found [here](generic.md).
|
||||||
|
|
||||||
### Build
|
### Build
|
||||||
|
|
||||||
To build the conduwuit image with docker-compose, you first need to open and
|
Official conduwuit images are built using Nix's
|
||||||
modify the `docker-compose.yml` file. There you need to comment the `image:`
|
[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are
|
||||||
option and uncomment the `build:` option. Then call docker compose with:
|
repeatable and reproducible by anyone, keeps the images lightweight, and can be
|
||||||
|
built offline.
|
||||||
|
|
||||||
```bash
|
This also ensures portability of our images because `buildLayeredImage` builds
|
||||||
docker compose up
|
OCI images, not Docker images, and works with other container software.
|
||||||
```
|
|
||||||
|
|
||||||
This will also start the container right afterwards, so if want it to run in
|
The OCI images are OS-less with only a very minimal environment of the `tini`
|
||||||
detached mode, you also should use the `-d` flag.
|
init system, CA certificates, and the conduwuit binary. This does mean there is
|
||||||
|
not a shell, but in theory you can get a shell by adding the necessary layers
|
||||||
|
to the layered image. However it's very unlikely you will need a shell for any
|
||||||
|
real troubleshooting.
|
||||||
|
|
||||||
|
The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def].
|
||||||
|
|
||||||
|
To build an OCI image using Nix, the following outputs can be built:
|
||||||
|
- `nix build -L .#oci-image` (default features, x86_64 glibc)
|
||||||
|
- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl)
|
||||||
|
- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl)
|
||||||
|
- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl)
|
||||||
|
- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl)
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
||||||
|
@ -136,3 +148,6 @@ those two files.
|
||||||
## Voice communication
|
## Voice communication
|
||||||
|
|
||||||
See the [TURN](../turn.md) page.
|
See the [TURN](../turn.md) page.
|
||||||
|
|
||||||
|
[nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage
|
||||||
|
[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix
|
||||||
|
|
|
@ -54,13 +54,13 @@ While conduwuit can run as any user it is better to use dedicated users for
|
||||||
different services. This also allows you to make sure that the file permissions
|
different services. This also allows you to make sure that the file permissions
|
||||||
are correctly set up.
|
are correctly set up.
|
||||||
|
|
||||||
In Debian or Fedora/RHEL, you can use this command to create a conduwuit user:
|
In Debian, you can use this command to create a conduwuit user:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo adduser --system conduwuit --group --disabled-login --no-create-home
|
sudo adduser --system conduwuit --group --disabled-login --no-create-home
|
||||||
```
|
```
|
||||||
|
|
||||||
For distros without `adduser`:
|
For distros without `adduser` (or where it's a symlink to `useradd`):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit
|
sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit
|
||||||
|
@ -142,8 +142,8 @@ If using Nginx, you need to give conduwuit the request URI using `$request_uri`,
|
||||||
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
||||||
- `proxy_pass http://127.0.0.1:6167;`
|
- `proxy_pass http://127.0.0.1:6167;`
|
||||||
|
|
||||||
Nginx users may need to set `proxy_buffering off;` if there are issues with
|
Nginx users need to increase `client_max_body_size` (default is 1M) to match
|
||||||
uploading media like images. This is due to Nginx storing the entire POST content in-memory (`/tmp`) and running out of memory if on low memory hardware.
|
`max_request_size` defined in conduwuit.toml.
|
||||||
|
|
||||||
You will need to reverse proxy everything under following routes:
|
You will need to reverse proxy everything under following routes:
|
||||||
- `/_matrix/` - core Matrix C-S and S-S APIs
|
- `/_matrix/` - core Matrix C-S and S-S APIs
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
# conduwuit for Kubernetes
|
# conduwuit for Kubernetes
|
||||||
|
|
||||||
conduwuit doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run conduwuit on Kubernetes:
|
conduwuit doesn't support horizontal scalability or distributed loading
|
||||||
<https://gitlab.cronce.io/charts/conduwuit>
|
natively, however a community maintained Helm Chart is available here to run
|
||||||
|
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
|
||||||
|
|
||||||
|
Should changes need to be made, please reach out to the maintainer in our
|
||||||
|
Matrix room as this is not maintained/controlled by the conduwuit maintainers.
|
||||||
|
|
|
@ -55,15 +55,31 @@ appropriately to use conduwuit instead of Conduit.
|
||||||
### UNIX sockets
|
### UNIX sockets
|
||||||
|
|
||||||
Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module
|
Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module
|
||||||
it is not possible to use UNIX sockets. This is because the UNIX socket option does not exist
|
a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX
|
||||||
in Conduit, and their module forces listening on `[::1]:6167` by default if unspecified.
|
socket option does not exist in Conduit, and the module forcibly sets the `address` and
|
||||||
|
`port` config options.
|
||||||
|
|
||||||
|
```nix
|
||||||
|
options.services.matrix-conduit.settings = lib.mkOption {
|
||||||
|
apply = old: old // (
|
||||||
|
if (old.global ? "unix_socket_path")
|
||||||
|
then { global = builtins.removeAttrs old.global [ "address" "port" ]; }
|
||||||
|
else { }
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
Additionally, the [`matrix-conduit` systemd unit][systemd-unit] in the module does not allow
|
Additionally, the [`matrix-conduit` systemd unit][systemd-unit] in the module does not allow
|
||||||
the `AF_UNIX` socket address family in their systemd unit's `RestrictAddressFamilies=` which
|
the `AF_UNIX` socket address family in their systemd unit's `RestrictAddressFamilies=` which
|
||||||
disallows the namespace from accessing or creating UNIX sockets.
|
disallows the namespace from accessing or creating UNIX sockets and has to be enabled like so:
|
||||||
|
|
||||||
There is no known workaround these. A conduwuit NixOS configuration module must be developed and
|
```nix
|
||||||
published by the community.
|
systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ];
|
||||||
|
```
|
||||||
|
|
||||||
|
Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and
|
||||||
|
published by the community, would be appreciated.
|
||||||
|
|
||||||
### jemalloc and hardened profile
|
### jemalloc and hardened profile
|
||||||
|
|
||||||
|
|
|
@ -75,21 +75,21 @@ development (unresponsive or slow upstream), conduwuit-specific usecases, or
|
||||||
lack of time to upstream some things.
|
lack of time to upstream some things.
|
||||||
|
|
||||||
- [ruma/ruma][1]: <https://github.com/girlbossceo/ruwuma> - various performance
|
- [ruma/ruma][1]: <https://github.com/girlbossceo/ruwuma> - various performance
|
||||||
improvements, more features, faster-paced development, client/server interop
|
improvements, more features, faster-paced development, better client/server interop
|
||||||
hacks upstream won't accept, etc
|
hacks upstream won't accept, etc
|
||||||
- [facebook/rocksdb][2]: <https://github.com/girlbossceo/rocksdb> - liburing
|
- [facebook/rocksdb][2]: <https://github.com/girlbossceo/rocksdb> - liburing
|
||||||
build fixes, GCC build fix, and logging callback C API for Rust tracing
|
build fixes and GCC debug build fix
|
||||||
integration
|
|
||||||
- [tikv/jemallocator][3]: <https://github.com/girlbossceo/jemallocator> - musl
|
- [tikv/jemallocator][3]: <https://github.com/girlbossceo/jemallocator> - musl
|
||||||
builds seem to be broken on upstream
|
builds seem to be broken on upstream, fixes some broken/suspicious code in
|
||||||
|
places, additional safety measures, and support redzones for Valgrind
|
||||||
- [zyansheep/rustyline-async][4]:
|
- [zyansheep/rustyline-async][4]:
|
||||||
<https://github.com/girlbossceo/rustyline-async> - tab completion callback and
|
<https://github.com/girlbossceo/rustyline-async> - tab completion callback and
|
||||||
`CTRL+\` signal quit event for CLI
|
`CTRL+\` signal quit event for conduwuit console CLI
|
||||||
- [rust-rocksdb/rust-rocksdb][5]:
|
- [rust-rocksdb/rust-rocksdb][5]:
|
||||||
<https://github.com/girlbossceo/rust-rocksdb-zaidoon1> - [`@zaidoon1`'s][8] fork
|
<https://github.com/girlbossceo/rust-rocksdb-zaidoon1> - [`@zaidoon1`][8]'s fork
|
||||||
has quicker updates, more up to date dependencies. Our changes fix musl build
|
has quicker updates, more up to date dependencies, etc. Our fork fixes musl build
|
||||||
issues, Rust part of the logging callback C API, removes unnecessary `gtest`
|
issues, removes unnecessary `gtest` include, and uses our RocksDB and jemallocator
|
||||||
include, and uses our RocksDB and jemallocator
|
forks.
|
||||||
- [tokio-rs/tracing][6]: <https://github.com/girlbossceo/tracing> - Implements
|
- [tokio-rs/tracing][6]: <https://github.com/girlbossceo/tracing> - Implements
|
||||||
`Clone` for `EnvFilter` to support dynamically changing tracing envfilter's
|
`Clone` for `EnvFilter` to support dynamically changing tracing envfilter's
|
||||||
alongside other logging/metrics things
|
alongside other logging/metrics things
|
||||||
|
@ -103,12 +103,16 @@ tokio_unstable` flag to enable experimental tokio APIs. A build might look like
|
||||||
this:
|
this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
RUSTFLAGS="--cfg tokio_unstable" cargo build \
|
RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
||||||
--release \
|
--release \
|
||||||
--no-default-features \
|
--no-default-features \
|
||||||
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You will also need to enable the `tokio_console` config option in conduwuit when
|
||||||
|
starting it. This was due to tokio-console causing gradual memory leak/usage
|
||||||
|
if left enabled.
|
||||||
|
|
||||||
[1]: https://github.com/ruma/ruma/
|
[1]: https://github.com/ruma/ruma/
|
||||||
[2]: https://github.com/facebook/rocksdb/
|
[2]: https://github.com/facebook/rocksdb/
|
||||||
[3]: https://github.com/tikv/jemallocator/
|
[3]: https://github.com/tikv/jemallocator/
|
||||||
|
|
|
@ -5,8 +5,8 @@
|
||||||
Have a look at [Complement's repository][complement] for an explanation of what
|
Have a look at [Complement's repository][complement] for an explanation of what
|
||||||
it is.
|
it is.
|
||||||
|
|
||||||
To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv installed
|
To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv
|
||||||
and set up, you can:
|
installed and set up, you can:
|
||||||
|
|
||||||
* Run `./bin/complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl
|
* Run `./bin/complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl
|
||||||
./path/to/results.jsonl` to build a Complement image, run the tests, and output
|
./path/to/results.jsonl` to build a Complement image, run the tests, and output
|
||||||
|
|
|
@ -106,24 +106,46 @@ Various debug commands can be found in `!admin debug`.
|
||||||
|
|
||||||
#### Debug/Trace log level
|
#### Debug/Trace log level
|
||||||
|
|
||||||
conduwuit builds without debug or trace log levels by default for at least
|
conduwuit builds without debug or trace log levels at compile time by default
|
||||||
performance reasons. This may change in the future and/or binaries providing
|
for substantial performance gains in CPU usage and improved compile times. If
|
||||||
such configurations may be provided. If you need to access debug/trace log
|
you need to access debug/trace log levels, you will need to build without the
|
||||||
levels, you will need to build without the `release_max_log_level` feature.
|
`release_max_log_level` feature or use our provided static debug binaries.
|
||||||
|
|
||||||
#### Changing log level dynamically
|
#### Changing log level dynamically
|
||||||
|
|
||||||
conduwuit supports changing the tracing log environment filter on-the-fly using
|
conduwuit supports changing the tracing log environment filter on-the-fly using
|
||||||
the admin command `!admin debug change-log-level`. This accepts a string
|
the admin command `!admin debug change-log-level <log env filter>`. This accepts
|
||||||
**without quotes** the same format as the `log` config option.
|
a string **without quotes** the same format as the `log` config option.
|
||||||
|
|
||||||
|
Example: `!admin debug change-log-level debug`
|
||||||
|
|
||||||
|
This can also accept complex filters such as:
|
||||||
|
`!admin debug change-log-level info,conduit_service[{dest="example.com"}]=trace,ruma_state_res=trace`
|
||||||
|
`!admin debug change-log-level info,conduit_service[{dest="example.com"}]=trace,conduit_service[send{dest="example.org"}]=trace`
|
||||||
|
|
||||||
|
And to reset the log level to the one that was set at startup / last config
|
||||||
|
load, simply pass the `--reset` flag.
|
||||||
|
|
||||||
|
`!admin debug change-log-level --reset`
|
||||||
|
|
||||||
#### Pinging servers
|
#### Pinging servers
|
||||||
|
|
||||||
conduwuit can ping other servers using `!admin debug ping`. This takes a server
|
conduwuit can ping other servers using `!admin debug ping <server>`. This takes
|
||||||
name and goes through the server discovery process and queries
|
a server name and goes through the server discovery process and queries
|
||||||
`/_matrix/federation/v1/version`. Errors are outputted.
|
`/_matrix/federation/v1/version`. Errors are outputted.
|
||||||
|
|
||||||
|
While it does measure the latency of the request, it is not indicative of
|
||||||
|
server performance on either side as that endpoint is completely unauthenticated
|
||||||
|
and simply fetches a string on a static JSON endpoint. It is very low cost both
|
||||||
|
bandwidth and computationally.
|
||||||
|
|
||||||
#### Allocator memory stats
|
#### Allocator memory stats
|
||||||
|
|
||||||
When using jemalloc with jemallocator's `stats` feature, you can see conduwuit's
|
When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you
|
||||||
jemalloc memory stats by using `!admin debug memory-stats`
|
can see conduwuit's high-level allocator stats by using
|
||||||
|
`!admin server memory-usage` at the bottom.
|
||||||
|
|
||||||
|
If you are a developer, you can also view the raw jemalloc statistics with
|
||||||
|
`!admin debug memory-stats`. Please note that this output is extremely large
|
||||||
|
which may only be visible in the conduwuit console CLI due to PDU size limits,
|
||||||
|
and is not easy for non-developers to understand.
|
||||||
|
|
10
engage.toml
10
engage.toml
|
@ -188,6 +188,16 @@ cargo test \
|
||||||
--color=always
|
--color=always
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Checks if the generated example config differs from the checked in repo's
|
||||||
|
# example config.
|
||||||
|
[[task]]
|
||||||
|
name = "example-config"
|
||||||
|
group = "tests"
|
||||||
|
depends = ["cargo/default"]
|
||||||
|
script = """
|
||||||
|
git diff --exit-code conduwuit-example.toml
|
||||||
|
"""
|
||||||
|
|
||||||
# Ensure that the flake's default output can build and run without crashing
|
# Ensure that the flake's default output can build and run without crashing
|
||||||
#
|
#
|
||||||
# This is a dynamically-linked jemalloc build, which is a case not covered by
|
# This is a dynamically-linked jemalloc build, which is a case not covered by
|
||||||
|
|
|
@ -9,7 +9,7 @@ use crate::{
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Parser)]
|
#[derive(Debug, Parser)]
|
||||||
#[command(name = "admin", version = env!("CARGO_PKG_VERSION"))]
|
#[command(name = "conduwuit", version = conduit::version())]
|
||||||
pub(super) enum AdminCommand {
|
pub(super) enum AdminCommand {
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
/// - Commands for managing appservices
|
/// - Commands for managing appservices
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
fmt::Write,
|
fmt::Write,
|
||||||
|
iter::once,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::{Instant, SystemTime},
|
time::{Instant, SystemTime},
|
||||||
};
|
};
|
||||||
|
@ -43,7 +44,7 @@ pub(super) async fn get_auth_chain(&self, event_id: Box<EventId>) -> Result<Room
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
.auth_chain
|
.auth_chain
|
||||||
.event_ids_iter(room_id, &[&event_id])
|
.event_ids_iter(room_id, once(event_id.as_ref()))
|
||||||
.await?
|
.await?
|
||||||
.count()
|
.count()
|
||||||
.await;
|
.await;
|
||||||
|
|
|
@ -157,10 +157,7 @@ fn parse<'a>(
|
||||||
let message = error
|
let message = error
|
||||||
.to_string()
|
.to_string()
|
||||||
.replace("server.name", services.globals.server_name().as_str());
|
.replace("server.name", services.globals.server_name().as_str());
|
||||||
Err(reply(
|
Err(reply(RoomMessageEventContent::notice_plain(message), input.reply_id.as_deref()))
|
||||||
RoomMessageEventContent::notice_markdown(message),
|
|
||||||
input.reply_id.as_deref(),
|
|
||||||
))
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduit::Result;
|
use conduit::Result;
|
||||||
|
use futures::StreamExt;
|
||||||
use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId};
|
use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId};
|
||||||
|
|
||||||
use crate::Command;
|
use crate::Command;
|
||||||
|
@ -39,10 +40,11 @@ pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_
|
||||||
room_id,
|
room_id,
|
||||||
} => {
|
} => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results = services
|
let results: Vec<_> = services
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(room_id.as_deref(), &user_id, since)
|
.changes_since(room_id.as_deref(), &user_id, since)
|
||||||
.await?;
|
.collect()
|
||||||
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
|
|
|
@ -169,12 +169,14 @@ pub(crate) async fn get_context_route(
|
||||||
start: events_before
|
start: events_before
|
||||||
.last()
|
.last()
|
||||||
.map(at!(0))
|
.map(at!(0))
|
||||||
|
.or(Some(base_token))
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(ToString::to_string),
|
.map(ToString::to_string),
|
||||||
|
|
||||||
end: events_after
|
end: events_after
|
||||||
.last()
|
.last()
|
||||||
.map(at!(0))
|
.map(at!(0))
|
||||||
|
.or(Some(base_token))
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(ToString::to_string),
|
.map(ToString::to_string),
|
||||||
|
|
||||||
|
|
|
@ -136,8 +136,6 @@ pub(crate) async fn get_message_events_route(
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let start_token = events.first().map(at!(0)).unwrap_or(from);
|
|
||||||
|
|
||||||
let next_token = events.last().map(at!(0));
|
let next_token = events.last().map(at!(0));
|
||||||
|
|
||||||
if !cfg!(feature = "element_hacks") {
|
if !cfg!(feature = "element_hacks") {
|
||||||
|
@ -156,7 +154,7 @@ pub(crate) async fn get_message_events_route(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(get_message_events::v3::Response {
|
Ok(get_message_events::v3::Response {
|
||||||
start: start_token.to_string(),
|
start: from.to_string(),
|
||||||
end: next_token.as_ref().map(ToString::to_string),
|
end: next_token.as_ref().map(ToString::to_string),
|
||||||
chunk,
|
chunk,
|
||||||
state,
|
state,
|
||||||
|
|
|
@ -37,6 +37,7 @@ pub(super) mod unstable;
|
||||||
pub(super) mod unversioned;
|
pub(super) mod unversioned;
|
||||||
pub(super) mod user_directory;
|
pub(super) mod user_directory;
|
||||||
pub(super) mod voip;
|
pub(super) mod voip;
|
||||||
|
pub(super) mod well_known;
|
||||||
|
|
||||||
pub use account::full_user_deactivate;
|
pub use account::full_user_deactivate;
|
||||||
pub(super) use account::*;
|
pub(super) use account::*;
|
||||||
|
@ -80,6 +81,7 @@ pub(super) use unstable::*;
|
||||||
pub(super) use unversioned::*;
|
pub(super) use unversioned::*;
|
||||||
pub(super) use user_directory::*;
|
pub(super) use user_directory::*;
|
||||||
pub(super) use voip::*;
|
pub(super) use voip::*;
|
||||||
|
pub(super) use well_known::*;
|
||||||
|
|
||||||
/// generated device ID length
|
/// generated device ID length
|
||||||
const DEVICE_ID_LENGTH: usize = 10;
|
const DEVICE_ID_LENGTH: usize = 10;
|
||||||
|
|
|
@ -52,8 +52,8 @@ pub(crate) async fn get_presence_route(
|
||||||
|
|
||||||
let has_shared_rooms = services
|
let has_shared_rooms = services
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.state_cache
|
||||||
.has_shared_rooms(sender_user, &body.user_id)
|
.user_sees_user(sender_user, &body.user_id)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
if has_shared_rooms {
|
if has_shared_rooms {
|
||||||
|
|
|
@ -441,9 +441,12 @@ pub(crate) async fn set_pushers_route(
|
||||||
) -> Result<set_pusher::v3::Response> {
|
) -> Result<set_pusher::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
services.pusher.set_pusher(sender_user, &body.action);
|
services
|
||||||
|
.pusher
|
||||||
|
.set_pusher(sender_user, &body.action)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(set_pusher::v3::Response::default())
|
Ok(set_pusher::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// user somehow has bad push rules, these must always exist per spec.
|
/// user somehow has bad push rules, these must always exist per spec.
|
||||||
|
|
72
src/api/client/room/initial_sync.rs
Normal file
72
src/api/client/room/initial_sync.rs
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
use axum::extract::State;
|
||||||
|
use conduit::{at, utils::BoolExt, Err, Result};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response};
|
||||||
|
|
||||||
|
use crate::Ruma;
|
||||||
|
|
||||||
|
const LIMIT_MAX: usize = 100;
|
||||||
|
|
||||||
|
pub(crate) async fn room_initial_sync_route(
|
||||||
|
State(services): State<crate::State>, body: Ruma<Request>,
|
||||||
|
) -> Result<Response> {
|
||||||
|
let room_id = &body.room_id;
|
||||||
|
|
||||||
|
if !services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.user_can_see_state_events(body.sender_user(), room_id)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
return Err!(Request(Forbidden("No room preview available.")));
|
||||||
|
}
|
||||||
|
|
||||||
|
let limit = LIMIT_MAX;
|
||||||
|
let events: Vec<_> = services
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.pdus_rev(None, room_id, None)
|
||||||
|
.await?
|
||||||
|
.take(limit)
|
||||||
|
.collect()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let state: Vec<_> = services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_full_pdus(room_id)
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(|pdu| pdu.to_state_event())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let messages = PaginationChunk {
|
||||||
|
start: events.last().map(at!(0)).as_ref().map(ToString::to_string),
|
||||||
|
|
||||||
|
end: events
|
||||||
|
.first()
|
||||||
|
.map(at!(0))
|
||||||
|
.as_ref()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.unwrap_or_default(),
|
||||||
|
|
||||||
|
chunk: events
|
||||||
|
.into_iter()
|
||||||
|
.map(at!(1))
|
||||||
|
.map(|pdu| pdu.to_room_event())
|
||||||
|
.collect(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Response {
|
||||||
|
room_id: room_id.to_owned(),
|
||||||
|
account_data: None,
|
||||||
|
state: state.into(),
|
||||||
|
messages: messages.chunk.is_empty().or_some(messages),
|
||||||
|
visibility: services.rooms.directory.visibility(room_id).await.into(),
|
||||||
|
membership: services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.user_membership(body.sender_user(), room_id)
|
||||||
|
.await,
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,9 +1,10 @@
|
||||||
mod aliases;
|
mod aliases;
|
||||||
mod create;
|
mod create;
|
||||||
mod event;
|
mod event;
|
||||||
|
mod initial_sync;
|
||||||
mod upgrade;
|
mod upgrade;
|
||||||
|
|
||||||
pub(crate) use self::{
|
pub(crate) use self::{
|
||||||
aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route,
|
aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route,
|
||||||
upgrade::upgrade_room_route,
|
initial_sync::room_initial_sync_route, upgrade::upgrade_room_route,
|
||||||
};
|
};
|
||||||
|
|
|
@ -198,8 +198,10 @@ pub(crate) async fn login_route(
|
||||||
|
|
||||||
// send client well-known if specified so the client knows to reconfigure itself
|
// send client well-known if specified so the client knows to reconfigure itself
|
||||||
let client_discovery_info: Option<DiscoveryInfo> = services
|
let client_discovery_info: Option<DiscoveryInfo> = services
|
||||||
.globals
|
.server
|
||||||
.well_known_client()
|
.config
|
||||||
|
.well_known
|
||||||
|
.client
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|server| DiscoveryInfo::new(HomeserverInfo::new(server.to_string())));
|
.map(|server| DiscoveryInfo::new(HomeserverInfo::new(server.to_string())));
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,8 @@ pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route};
|
||||||
use crate::{service::Services, Error, PduEvent, Result};
|
use crate::{service::Services, Error, PduEvent, Result};
|
||||||
|
|
||||||
async fn load_timeline(
|
async fn load_timeline(
|
||||||
services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: usize,
|
services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount,
|
||||||
|
next_batch: Option<PduCount>, limit: usize,
|
||||||
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
|
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
|
||||||
let last_timeline_count = services
|
let last_timeline_count = services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -26,7 +27,8 @@ async fn load_timeline(
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_rev(Some(sender_user), room_id, None)
|
.pdus_rev(Some(sender_user), room_id, None)
|
||||||
.await?
|
.await?
|
||||||
.ready_take_while(|(pducount, _)| *pducount > roomsincecount);
|
.ready_skip_while(|&(pducount, _)| pducount > next_batch.unwrap_or_else(PduCount::max))
|
||||||
|
.ready_take_while(|&(pducount, _)| pducount > roomsincecount);
|
||||||
|
|
||||||
// Take the last events for the timeline
|
// Take the last events for the timeline
|
||||||
let timeline_pdus: Vec<_> = non_timeline_pdus
|
let timeline_pdus: Vec<_> = non_timeline_pdus
|
||||||
|
@ -50,7 +52,7 @@ async fn share_encrypted_room(
|
||||||
) -> bool {
|
) -> bool {
|
||||||
services
|
services
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.state_cache
|
||||||
.get_shared_rooms(sender_user, user_id)
|
.get_shared_rooms(sender_user, user_id)
|
||||||
.ready_filter(|&room_id| Some(room_id) != ignore_room)
|
.ready_filter(|&room_id| Some(room_id) != ignore_room)
|
||||||
.any(|other_room_id| {
|
.any(|other_room_id| {
|
||||||
|
|
|
@ -275,10 +275,9 @@ pub(crate) async fn sync_events_route(
|
||||||
events: services
|
events: services
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(None, &sender_user, since)
|
.changes_since(None, &sender_user, since)
|
||||||
.await?
|
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
||||||
.into_iter()
|
.collect()
|
||||||
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
.await,
|
||||||
.collect(),
|
|
||||||
},
|
},
|
||||||
device_lists: DeviceLists {
|
device_lists: DeviceLists {
|
||||||
changed: device_list_updates.into_iter().collect(),
|
changed: device_list_updates.into_iter().collect(),
|
||||||
|
@ -540,7 +539,8 @@ async fn load_joined_room(
|
||||||
let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await;
|
let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await;
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
|
|
||||||
let (timeline_pdus, limited) = load_timeline(services, sender_user, room_id, sincecount, 10_usize).await?;
|
let (timeline_pdus, limited) =
|
||||||
|
load_timeline(services, sender_user, room_id, sincecount, Some(next_batchcount), 10_usize).await?;
|
||||||
|
|
||||||
let send_notification_counts = !timeline_pdus.is_empty()
|
let send_notification_counts = !timeline_pdus.is_empty()
|
||||||
|| services
|
|| services
|
||||||
|
@ -757,7 +757,6 @@ async fn load_joined_room(
|
||||||
};
|
};
|
||||||
|
|
||||||
delta_state_events.push(pdu);
|
delta_state_events.push(pdu);
|
||||||
tokio::task::yield_now().await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -946,7 +945,6 @@ async fn load_joined_room(
|
||||||
let prev_batch = timeline_pdus
|
let prev_batch = timeline_pdus
|
||||||
.first()
|
.first()
|
||||||
.map(at!(0))
|
.map(at!(0))
|
||||||
.map(|count| count.saturating_sub(1))
|
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(ToString::to_string);
|
.map(ToString::to_string);
|
||||||
|
|
||||||
|
@ -1023,10 +1021,9 @@ async fn load_joined_room(
|
||||||
events: services
|
events: services
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(Some(room_id), sender_user, since)
|
.changes_since(Some(room_id), sender_user, since)
|
||||||
.await?
|
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
||||||
.into_iter()
|
.collect()
|
||||||
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
.await,
|
||||||
.collect(),
|
|
||||||
},
|
},
|
||||||
summary: RoomSummary {
|
summary: RoomSummary {
|
||||||
heroes,
|
heroes,
|
||||||
|
|
|
@ -136,10 +136,9 @@ pub(crate) async fn sync_events_v4_route(
|
||||||
account_data.global = services
|
account_data.global = services
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(None, sender_user, globalsince)
|
.changes_since(None, sender_user, globalsince)
|
||||||
.await?
|
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
||||||
.into_iter()
|
.collect()
|
||||||
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
.await;
|
||||||
.collect();
|
|
||||||
|
|
||||||
if let Some(rooms) = body.extensions.account_data.rooms {
|
if let Some(rooms) = body.extensions.account_data.rooms {
|
||||||
for room in rooms {
|
for room in rooms {
|
||||||
|
@ -148,10 +147,9 @@ pub(crate) async fn sync_events_v4_route(
|
||||||
services
|
services
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(Some(&room), sender_user, globalsince)
|
.changes_since(Some(&room), sender_user, globalsince)
|
||||||
.await?
|
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
||||||
.into_iter()
|
.collect()
|
||||||
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
.await,
|
||||||
.collect(),
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -473,7 +471,7 @@ pub(crate) async fn sync_events_v4_route(
|
||||||
(timeline_pdus, limited) = (Vec::new(), true);
|
(timeline_pdus, limited) = (Vec::new(), true);
|
||||||
} else {
|
} else {
|
||||||
(timeline_pdus, limited) =
|
(timeline_pdus, limited) =
|
||||||
match load_timeline(&services, sender_user, room_id, roomsincecount, *timeline_limit).await {
|
match load_timeline(&services, sender_user, room_id, roomsincecount, None, *timeline_limit).await {
|
||||||
Ok(value) => value,
|
Ok(value) => value,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
warn!("Encountered missing timeline in {}, error {}", room_id, err);
|
warn!("Encountered missing timeline in {}, error {}", room_id, err);
|
||||||
|
@ -487,10 +485,9 @@ pub(crate) async fn sync_events_v4_route(
|
||||||
services
|
services
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(Some(room_id), sender_user, *roomsince)
|
.changes_since(Some(room_id), sender_user, *roomsince)
|
||||||
.await?
|
.ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
||||||
.into_iter()
|
.collect()
|
||||||
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
.await,
|
||||||
.collect(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let vector: Vec<_> = services
|
let vector: Vec<_> = services
|
||||||
|
|
|
@ -55,7 +55,7 @@ pub(crate) async fn get_mutual_rooms_route(
|
||||||
|
|
||||||
let mutual_rooms: Vec<OwnedRoomId> = services
|
let mutual_rooms: Vec<OwnedRoomId> = services
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.state_cache
|
||||||
.get_shared_rooms(sender_user, &body.user_id)
|
.get_shared_rooms(sender_user, &body.user_id)
|
||||||
.map(ToOwned::to_owned)
|
.map(ToOwned::to_owned)
|
||||||
.collect()
|
.collect()
|
||||||
|
|
|
@ -2,16 +2,9 @@ use std::collections::BTreeMap;
|
||||||
|
|
||||||
use axum::{extract::State, response::IntoResponse, Json};
|
use axum::{extract::State, response::IntoResponse, Json};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::api::client::{
|
use ruma::api::client::discovery::get_supported_versions;
|
||||||
discovery::{
|
|
||||||
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
|
|
||||||
discover_support::{self, Contact},
|
|
||||||
get_supported_versions,
|
|
||||||
},
|
|
||||||
error::ErrorKind,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{Error, Result, Ruma};
|
use crate::{Result, Ruma};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/versions`
|
/// # `GET /_matrix/client/versions`
|
||||||
///
|
///
|
||||||
|
@ -65,99 +58,6 @@ pub(crate) async fn get_supported_versions_route(
|
||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /.well-known/matrix/client`
|
|
||||||
///
|
|
||||||
/// Returns the .well-known URL if it is configured, otherwise returns 404.
|
|
||||||
pub(crate) async fn well_known_client(
|
|
||||||
State(services): State<crate::State>, _body: Ruma<discover_homeserver::Request>,
|
|
||||||
) -> Result<discover_homeserver::Response> {
|
|
||||||
let client_url = match services.globals.well_known_client() {
|
|
||||||
Some(url) => url.to_string(),
|
|
||||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(discover_homeserver::Response {
|
|
||||||
homeserver: HomeserverInfo {
|
|
||||||
base_url: client_url.clone(),
|
|
||||||
},
|
|
||||||
identity_server: None,
|
|
||||||
sliding_sync_proxy: Some(SlidingSyncProxyInfo {
|
|
||||||
url: client_url,
|
|
||||||
}),
|
|
||||||
tile_server: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /.well-known/matrix/support`
|
|
||||||
///
|
|
||||||
/// Server support contact and support page of a homeserver's domain.
|
|
||||||
pub(crate) async fn well_known_support(
|
|
||||||
State(services): State<crate::State>, _body: Ruma<discover_support::Request>,
|
|
||||||
) -> Result<discover_support::Response> {
|
|
||||||
let support_page = services
|
|
||||||
.globals
|
|
||||||
.well_known_support_page()
|
|
||||||
.as_ref()
|
|
||||||
.map(ToString::to_string);
|
|
||||||
|
|
||||||
let role = services.globals.well_known_support_role().clone();
|
|
||||||
|
|
||||||
// support page or role must be either defined for this to be valid
|
|
||||||
if support_page.is_none() && role.is_none() {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let email_address = services.globals.well_known_support_email().clone();
|
|
||||||
let matrix_id = services.globals.well_known_support_mxid().clone();
|
|
||||||
|
|
||||||
// if a role is specified, an email address or matrix id is required
|
|
||||||
if role.is_some() && (email_address.is_none() && matrix_id.is_none()) {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
|
||||||
}
|
|
||||||
|
|
||||||
// TOOD: support defining multiple contacts in the config
|
|
||||||
let mut contacts: Vec<Contact> = vec![];
|
|
||||||
|
|
||||||
if let Some(role) = role {
|
|
||||||
let contact = Contact {
|
|
||||||
role,
|
|
||||||
email_address,
|
|
||||||
matrix_id,
|
|
||||||
};
|
|
||||||
|
|
||||||
contacts.push(contact);
|
|
||||||
}
|
|
||||||
|
|
||||||
// support page or role+contacts must be either defined for this to be valid
|
|
||||||
if contacts.is_empty() && support_page.is_none() {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(discover_support::Response {
|
|
||||||
contacts,
|
|
||||||
support_page,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /client/server.json`
|
|
||||||
///
|
|
||||||
/// Endpoint provided by sliding sync proxy used by some clients such as Element
|
|
||||||
/// Web as a non-standard health check.
|
|
||||||
pub(crate) async fn syncv3_client_server_json(State(services): State<crate::State>) -> Result<impl IntoResponse> {
|
|
||||||
let server_url = match services.globals.well_known_client() {
|
|
||||||
Some(url) => url.to_string(),
|
|
||||||
None => match services.globals.well_known_server() {
|
|
||||||
Some(url) => url.to_string(),
|
|
||||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Json(serde_json::json!({
|
|
||||||
"server": server_url,
|
|
||||||
"version": conduit::version(),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_conduwuit/server_version`
|
/// # `GET /_conduwuit/server_version`
|
||||||
///
|
///
|
||||||
/// Conduwuit-specific API to get the server version, results akin to
|
/// Conduwuit-specific API to get the server version, results akin to
|
||||||
|
|
|
@ -71,8 +71,8 @@ pub(crate) async fn search_users_route(
|
||||||
} else {
|
} else {
|
||||||
let user_is_in_shared_rooms = services
|
let user_is_in_shared_rooms = services
|
||||||
.rooms
|
.rooms
|
||||||
.user
|
.state_cache
|
||||||
.has_shared_rooms(sender_user, &user.user_id)
|
.user_sees_user(sender_user, &user.user_id)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
if user_is_in_shared_rooms {
|
if user_is_in_shared_rooms {
|
||||||
|
|
105
src/api/client/well_known.rs
Normal file
105
src/api/client/well_known.rs
Normal file
|
@ -0,0 +1,105 @@
|
||||||
|
use axum::{extract::State, response::IntoResponse, Json};
|
||||||
|
use ruma::api::client::{
|
||||||
|
discovery::{
|
||||||
|
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
|
||||||
|
discover_support::{self, Contact},
|
||||||
|
},
|
||||||
|
error::ErrorKind,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{Error, Result, Ruma};
|
||||||
|
|
||||||
|
/// # `GET /.well-known/matrix/client`
|
||||||
|
///
|
||||||
|
/// Returns the .well-known URL if it is configured, otherwise returns 404.
|
||||||
|
pub(crate) async fn well_known_client(
|
||||||
|
State(services): State<crate::State>, _body: Ruma<discover_homeserver::Request>,
|
||||||
|
) -> Result<discover_homeserver::Response> {
|
||||||
|
let client_url = match services.server.config.well_known.client.as_ref() {
|
||||||
|
Some(url) => url.to_string(),
|
||||||
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(discover_homeserver::Response {
|
||||||
|
homeserver: HomeserverInfo {
|
||||||
|
base_url: client_url.clone(),
|
||||||
|
},
|
||||||
|
identity_server: None,
|
||||||
|
sliding_sync_proxy: Some(SlidingSyncProxyInfo {
|
||||||
|
url: client_url,
|
||||||
|
}),
|
||||||
|
tile_server: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /.well-known/matrix/support`
|
||||||
|
///
|
||||||
|
/// Server support contact and support page of a homeserver's domain.
|
||||||
|
pub(crate) async fn well_known_support(
|
||||||
|
State(services): State<crate::State>, _body: Ruma<discover_support::Request>,
|
||||||
|
) -> Result<discover_support::Response> {
|
||||||
|
let support_page = services
|
||||||
|
.server
|
||||||
|
.config
|
||||||
|
.well_known
|
||||||
|
.support_page
|
||||||
|
.as_ref()
|
||||||
|
.map(ToString::to_string);
|
||||||
|
|
||||||
|
let role = services.server.config.well_known.support_role.clone();
|
||||||
|
|
||||||
|
// support page or role must be either defined for this to be valid
|
||||||
|
if support_page.is_none() && role.is_none() {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
||||||
|
}
|
||||||
|
|
||||||
|
let email_address = services.server.config.well_known.support_email.clone();
|
||||||
|
let matrix_id = services.server.config.well_known.support_mxid.clone();
|
||||||
|
|
||||||
|
// if a role is specified, an email address or matrix id is required
|
||||||
|
if role.is_some() && (email_address.is_none() && matrix_id.is_none()) {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
||||||
|
}
|
||||||
|
|
||||||
|
// TOOD: support defining multiple contacts in the config
|
||||||
|
let mut contacts: Vec<Contact> = vec![];
|
||||||
|
|
||||||
|
if let Some(role) = role {
|
||||||
|
let contact = Contact {
|
||||||
|
role,
|
||||||
|
email_address,
|
||||||
|
matrix_id,
|
||||||
|
};
|
||||||
|
|
||||||
|
contacts.push(contact);
|
||||||
|
}
|
||||||
|
|
||||||
|
// support page or role+contacts must be either defined for this to be valid
|
||||||
|
if contacts.is_empty() && support_page.is_none() {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(discover_support::Response {
|
||||||
|
contacts,
|
||||||
|
support_page,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /client/server.json`
|
||||||
|
///
|
||||||
|
/// Endpoint provided by sliding sync proxy used by some clients such as Element
|
||||||
|
/// Web as a non-standard health check.
|
||||||
|
pub(crate) async fn syncv3_client_server_json(State(services): State<crate::State>) -> Result<impl IntoResponse> {
|
||||||
|
let server_url = match services.server.config.well_known.client.as_ref() {
|
||||||
|
Some(url) => url.to_string(),
|
||||||
|
None => match services.server.config.well_known.server.as_ref() {
|
||||||
|
Some(url) => url.to_string(),
|
||||||
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Json(serde_json::json!({
|
||||||
|
"server": server_url,
|
||||||
|
"version": conduit::version(),
|
||||||
|
})))
|
||||||
|
}
|
|
@ -183,8 +183,7 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
.ruma_route(&client::well_known_support)
|
.ruma_route(&client::well_known_support)
|
||||||
.ruma_route(&client::well_known_client)
|
.ruma_route(&client::well_known_client)
|
||||||
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
||||||
.route("/_matrix/client/r0/rooms/:room_id/initialSync", get(initial_sync))
|
.ruma_route(&client::room_initial_sync_route)
|
||||||
.route("/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync))
|
|
||||||
.route("/client/server.json", get(client::syncv3_client_server_json));
|
.route("/client/server.json", get(client::syncv3_client_server_json));
|
||||||
|
|
||||||
if config.allow_federation {
|
if config.allow_federation {
|
||||||
|
@ -285,10 +284,6 @@ async fn redirect_legacy_preview(uri: Uri) -> impl IntoResponse {
|
||||||
Redirect::temporary(&uri)
|
Redirect::temporary(&uri)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn initial_sync(_uri: Uri) -> impl IntoResponse {
|
|
||||||
err!(Request(GuestAccessForbidden("Guest access not implemented")))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn legacy_media_disabled() -> impl IntoResponse { err!(Request(Forbidden("Unauthenticated media is disabled."))) }
|
async fn legacy_media_disabled() -> impl IntoResponse { err!(Request(Forbidden("Unauthenticated media is disabled."))) }
|
||||||
|
|
||||||
async fn federation_disabled() -> impl IntoResponse { err!(Request(Forbidden("Federation is disabled."))) }
|
async fn federation_disabled() -> impl IntoResponse { err!(Request(Forbidden("Federation is disabled."))) }
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::borrow::Borrow;
|
use std::{borrow::Borrow, iter::once};
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduit::{Error, Result};
|
use conduit::{Error, Result};
|
||||||
|
@ -46,7 +46,7 @@ pub(crate) async fn get_event_authorization_route(
|
||||||
let auth_chain = services
|
let auth_chain = services
|
||||||
.rooms
|
.rooms
|
||||||
.auth_chain
|
.auth_chain
|
||||||
.event_ids_iter(room_id, &[body.event_id.borrow()])
|
.event_ids_iter(room_id, once(body.event_id.borrow()))
|
||||||
.await?
|
.await?
|
||||||
.filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() })
|
.filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() })
|
||||||
.then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu))
|
.then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu))
|
||||||
|
|
|
@ -11,7 +11,7 @@ use ruma::{
|
||||||
room::member::{MembershipState, RoomMemberEventContent},
|
room::member::{MembershipState, RoomMemberEventContent},
|
||||||
StateEventType,
|
StateEventType,
|
||||||
},
|
},
|
||||||
CanonicalJsonValue, EventId, OwnedServerName, OwnedUserId, RoomId, ServerName,
|
CanonicalJsonValue, OwnedServerName, OwnedUserId, RoomId, ServerName,
|
||||||
};
|
};
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
use service::Services;
|
use service::Services;
|
||||||
|
@ -184,11 +184,11 @@ async fn create_join_event(
|
||||||
.try_collect()
|
.try_collect()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let starting_events: Vec<&EventId> = state_ids.values().map(Borrow::borrow).collect();
|
let starting_events = state_ids.values().map(Borrow::borrow);
|
||||||
let auth_chain = services
|
let auth_chain = services
|
||||||
.rooms
|
.rooms
|
||||||
.auth_chain
|
.auth_chain
|
||||||
.event_ids_iter(room_id, &starting_events)
|
.event_ids_iter(room_id, starting_events)
|
||||||
.await?
|
.await?
|
||||||
.map(Ok)
|
.map(Ok)
|
||||||
.and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await })
|
.and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await })
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::borrow::Borrow;
|
use std::{borrow::Borrow, iter::once};
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduit::{err, result::LogErr, utils::IterStream, Result};
|
use conduit::{err, result::LogErr, utils::IterStream, Result};
|
||||||
|
@ -52,7 +52,7 @@ pub(crate) async fn get_room_state_route(
|
||||||
let auth_chain = services
|
let auth_chain = services
|
||||||
.rooms
|
.rooms
|
||||||
.auth_chain
|
.auth_chain
|
||||||
.event_ids_iter(&body.room_id, &[body.event_id.borrow()])
|
.event_ids_iter(&body.room_id, once(body.event_id.borrow()))
|
||||||
.await?
|
.await?
|
||||||
.map(Ok)
|
.map(Ok)
|
||||||
.and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await })
|
.and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await })
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::borrow::Borrow;
|
use std::{borrow::Borrow, iter::once};
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduit::{err, Result};
|
use conduit::{err, Result};
|
||||||
|
@ -44,7 +44,7 @@ pub(crate) async fn get_room_state_ids_route(
|
||||||
let auth_chain_ids = services
|
let auth_chain_ids = services
|
||||||
.rooms
|
.rooms
|
||||||
.auth_chain
|
.auth_chain
|
||||||
.event_ids_iter(&body.room_id, &[body.event_id.borrow()])
|
.event_ids_iter(&body.room_id, once(body.event_id.borrow()))
|
||||||
.await?
|
.await?
|
||||||
.map(|id| (*id).to_owned())
|
.map(|id| (*id).to_owned())
|
||||||
.collect()
|
.collect()
|
||||||
|
|
|
@ -10,7 +10,7 @@ pub(crate) async fn well_known_server(
|
||||||
State(services): State<crate::State>, _body: Ruma<discover_homeserver::Request>,
|
State(services): State<crate::State>, _body: Ruma<discover_homeserver::Request>,
|
||||||
) -> Result<discover_homeserver::Response> {
|
) -> Result<discover_homeserver::Response> {
|
||||||
Ok(discover_homeserver::Response {
|
Ok(discover_homeserver::Response {
|
||||||
server: match services.globals.well_known_server() {
|
server: match services.server.config.well_known.server.as_ref() {
|
||||||
Some(server_name) => server_name.to_owned(),
|
Some(server_name) => server_name.to_owned(),
|
||||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
||||||
},
|
},
|
||||||
|
|
|
@ -87,7 +87,8 @@ pub struct Config {
|
||||||
port: ListeningPort,
|
port: ListeningPort,
|
||||||
|
|
||||||
// external structure; separate section
|
// external structure; separate section
|
||||||
pub tls: Option<TlsConfig>,
|
#[serde(default)]
|
||||||
|
pub tls: TlsConfig,
|
||||||
|
|
||||||
/// Uncomment unix_socket_path to listen on a UNIX socket at the specified
|
/// Uncomment unix_socket_path to listen on a UNIX socket at the specified
|
||||||
/// path. If listening on a UNIX socket, you MUST remove/comment the
|
/// path. If listening on a UNIX socket, you MUST remove/comment the
|
||||||
|
@ -198,6 +199,10 @@ pub struct Config {
|
||||||
#[serde(default = "default_eventidshort_cache_capacity")]
|
#[serde(default = "default_eventidshort_cache_capacity")]
|
||||||
pub eventidshort_cache_capacity: u32,
|
pub eventidshort_cache_capacity: u32,
|
||||||
|
|
||||||
|
/// default: varies by system
|
||||||
|
#[serde(default = "default_eventid_pdu_cache_capacity")]
|
||||||
|
pub eventid_pdu_cache_capacity: u32,
|
||||||
|
|
||||||
/// default: varies by system
|
/// default: varies by system
|
||||||
#[serde(default = "default_shortstatekey_cache_capacity")]
|
#[serde(default = "default_shortstatekey_cache_capacity")]
|
||||||
pub shortstatekey_cache_capacity: u32,
|
pub shortstatekey_cache_capacity: u32,
|
||||||
|
@ -1496,39 +1501,47 @@ pub struct Config {
|
||||||
catchall: BTreeMap<String, IgnoredAny>,
|
catchall: BTreeMap<String, IgnoredAny>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize, Default)]
|
||||||
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.tls")]
|
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.tls")]
|
||||||
pub struct TlsConfig {
|
pub struct TlsConfig {
|
||||||
/// Path to a valid TLS certificate file.
|
/// Path to a valid TLS certificate file.
|
||||||
///
|
///
|
||||||
/// example: "/path/to/my/certificate.crt"
|
/// example: "/path/to/my/certificate.crt"
|
||||||
pub certs: String,
|
pub certs: Option<String>,
|
||||||
|
|
||||||
/// Path to a valid TLS certificate private key.
|
/// Path to a valid TLS certificate private key.
|
||||||
///
|
///
|
||||||
/// example: "/path/to/my/certificate.key"
|
/// example: "/path/to/my/certificate.key"
|
||||||
pub key: String,
|
pub key: Option<String>,
|
||||||
|
|
||||||
/// Whether to listen and allow for HTTP and HTTPS connections (insecure!)
|
/// Whether to listen and allow for HTTP and HTTPS connections (insecure!)
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub dual_protocol: bool,
|
pub dual_protocol: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)]
|
||||||
#[derive(Clone, Debug, Deserialize, Default)]
|
#[derive(Clone, Debug, Deserialize, Default)]
|
||||||
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.well_known")]
|
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.well_known")]
|
||||||
pub struct WellKnownConfig {
|
pub struct WellKnownConfig {
|
||||||
|
/// The server URL that the client well-known file will serve. This should
|
||||||
|
/// not contain a port, and should just be a valid HTTPS URL.
|
||||||
|
///
|
||||||
|
/// example: "https://matrix.example.com"
|
||||||
|
pub client: Option<Url>,
|
||||||
|
|
||||||
/// The server base domain of the URL with a specific port that the server
|
/// The server base domain of the URL with a specific port that the server
|
||||||
/// well-known file will serve. This should contain a port at the end, and
|
/// well-known file will serve. This should contain a port at the end, and
|
||||||
/// should not be a URL.
|
/// should not be a URL.
|
||||||
///
|
///
|
||||||
/// example: "matrix.example.com:443"
|
/// example: "matrix.example.com:443"
|
||||||
pub server: Option<OwnedServerName>,
|
pub server: Option<OwnedServerName>,
|
||||||
/// The server URL that the client well-known file will serve. This should
|
|
||||||
/// not contain a port, and should just be a valid HTTPS URL.
|
|
||||||
///
|
|
||||||
/// example: "<https://matrix.example.com>"
|
|
||||||
pub client: Option<Url>,
|
|
||||||
pub support_page: Option<Url>,
|
pub support_page: Option<Url>,
|
||||||
|
|
||||||
pub support_role: Option<ContactRole>,
|
pub support_role: Option<ContactRole>,
|
||||||
|
|
||||||
pub support_email: Option<String>,
|
pub support_email: Option<String>,
|
||||||
|
|
||||||
pub support_mxid: Option<OwnedUserId>,
|
pub support_mxid: Option<OwnedUserId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2040,6 +2053,8 @@ fn default_shorteventid_cache_capacity() -> u32 { parallelism_scaled_u32(50_000)
|
||||||
|
|
||||||
fn default_eventidshort_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(100_000) }
|
fn default_eventidshort_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(100_000) }
|
||||||
|
|
||||||
|
fn default_eventid_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(100_000) }
|
||||||
|
|
||||||
fn default_shortstatekey_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
|
fn default_shortstatekey_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
|
||||||
|
|
||||||
fn default_statekeyshort_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
|
fn default_statekeyshort_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
|
||||||
|
@ -2048,7 +2063,7 @@ fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(50
|
||||||
|
|
||||||
fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
||||||
|
|
||||||
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) }
|
||||||
|
|
||||||
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
use std::cmp::{Eq, Ord};
|
use std::{
|
||||||
|
cmp::{Eq, Ord},
|
||||||
|
pin::Pin,
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::{Stream, StreamExt};
|
||||||
|
|
||||||
use crate::{is_equal_to, is_less_than};
|
use crate::{is_equal_to, is_less_than};
|
||||||
|
|
||||||
|
@ -45,3 +51,27 @@ where
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Intersection of sets
|
||||||
|
///
|
||||||
|
/// Outputs the set of elements common to both streams. Streams must be sorted.
|
||||||
|
pub fn intersection_sorted_stream2<Item, S>(a: S, b: S) -> impl Stream<Item = Item> + Send
|
||||||
|
where
|
||||||
|
S: Stream<Item = Item> + Send + Unpin,
|
||||||
|
Item: Eq + PartialOrd + Send + Sync,
|
||||||
|
{
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
|
let b = Arc::new(Mutex::new(b.peekable()));
|
||||||
|
a.map(move |ai| (ai, b.clone()))
|
||||||
|
.filter_map(|(ai, b)| async move {
|
||||||
|
let mut lock = b.lock().await;
|
||||||
|
while let Some(bi) = Pin::new(&mut *lock).next_if(|bi| *bi <= ai).await.as_ref() {
|
||||||
|
if ai == *bi {
|
||||||
|
return Some(ai);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -237,3 +237,42 @@ fn set_intersection_sorted_all() {
|
||||||
let r = intersection_sorted(i.into_iter());
|
let r = intersection_sorted(i.into_iter());
|
||||||
assert!(r.eq(["bar", "baz", "foo"].iter()));
|
assert!(r.eq(["bar", "baz", "foo"].iter()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn set_intersection_sorted_stream2() {
|
||||||
|
use futures::StreamExt;
|
||||||
|
use utils::{set::intersection_sorted_stream2, IterStream};
|
||||||
|
|
||||||
|
let a = ["bar"];
|
||||||
|
let b = ["bar", "foo"];
|
||||||
|
let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream())
|
||||||
|
.collect::<Vec<&str>>()
|
||||||
|
.await;
|
||||||
|
assert!(r.eq(&["bar"]));
|
||||||
|
|
||||||
|
let r = intersection_sorted_stream2(b.iter().stream(), a.iter().stream())
|
||||||
|
.collect::<Vec<&str>>()
|
||||||
|
.await;
|
||||||
|
assert!(r.eq(&["bar"]));
|
||||||
|
|
||||||
|
let a = ["aaa", "ccc", "xxx", "yyy"];
|
||||||
|
let b = ["hhh", "iii", "jjj", "zzz"];
|
||||||
|
let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream())
|
||||||
|
.collect::<Vec<&str>>()
|
||||||
|
.await;
|
||||||
|
assert!(r.is_empty());
|
||||||
|
|
||||||
|
let a = ["aaa", "ccc", "eee", "ggg"];
|
||||||
|
let b = ["aaa", "bbb", "ccc", "ddd", "eee"];
|
||||||
|
let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream())
|
||||||
|
.collect::<Vec<&str>>()
|
||||||
|
.await;
|
||||||
|
assert!(r.eq(&["aaa", "ccc", "eee"]));
|
||||||
|
|
||||||
|
let a = ["aaa", "ccc", "eee", "ggg", "hhh", "iii"];
|
||||||
|
let b = ["bbb", "ccc", "ddd", "fff", "ggg", "iii"];
|
||||||
|
let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream())
|
||||||
|
.collect::<Vec<&str>>()
|
||||||
|
.await;
|
||||||
|
assert!(r.eq(&["ccc", "ggg", "iii"]));
|
||||||
|
}
|
||||||
|
|
|
@ -2,9 +2,10 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write};
|
||||||
|
|
||||||
use arrayvec::ArrayVec;
|
use arrayvec::ArrayVec;
|
||||||
use conduit::{err, implement, utils::IterStream, Result};
|
use conduit::{err, implement, utils::IterStream, Result};
|
||||||
use futures::{future::ready, Stream};
|
use futures::{FutureExt, Stream};
|
||||||
use rocksdb::DBPinnableSlice;
|
use rocksdb::DBPinnableSlice;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
use tokio::task;
|
||||||
|
|
||||||
use crate::{ser, util, Handle};
|
use crate::{ser, util, Handle};
|
||||||
|
|
||||||
|
@ -55,7 +56,8 @@ pub fn get<K>(&self, key: &K) -> impl Future<Output = Result<Handle<'_>>> + Send
|
||||||
where
|
where
|
||||||
K: AsRef<[u8]> + ?Sized + Debug,
|
K: AsRef<[u8]> + ?Sized + Debug,
|
||||||
{
|
{
|
||||||
ready(self.get_blocking(key))
|
let result = self.get_blocking(key);
|
||||||
|
task::consume_budget().map(move |()| result)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch a value from the database into cache, returning a reference-handle.
|
/// Fetch a value from the database into cache, returning a reference-handle.
|
||||||
|
@ -78,8 +80,8 @@ where
|
||||||
#[tracing::instrument(skip(self, keys), fields(%self), level = "trace")]
|
#[tracing::instrument(skip(self, keys), fields(%self), level = "trace")]
|
||||||
pub fn get_batch<'a, I, K>(&self, keys: I) -> impl Stream<Item = Result<Handle<'_>>>
|
pub fn get_batch<'a, I, K>(&self, keys: I) -> impl Stream<Item = Result<Handle<'_>>>
|
||||||
where
|
where
|
||||||
I: Iterator<Item = &'a K> + ExactSizeIterator + Send + Debug,
|
I: Iterator<Item = &'a K> + ExactSizeIterator + Debug + Send,
|
||||||
K: AsRef<[u8]> + Send + Sync + Sized + Debug + 'a,
|
K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a,
|
||||||
{
|
{
|
||||||
self.get_batch_blocking(keys).stream()
|
self.get_batch_blocking(keys).stream()
|
||||||
}
|
}
|
||||||
|
@ -87,8 +89,8 @@ where
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator<Item = Result<Handle<'_>>>
|
pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator<Item = Result<Handle<'_>>>
|
||||||
where
|
where
|
||||||
I: Iterator<Item = &'a K> + ExactSizeIterator + Send,
|
I: Iterator<Item = &'a K> + ExactSizeIterator + Debug + Send,
|
||||||
K: AsRef<[u8]> + Sized + 'a,
|
K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a,
|
||||||
{
|
{
|
||||||
// Optimization can be `true` if key vector is pre-sorted **by the column
|
// Optimization can be `true` if key vector is pre-sorted **by the column
|
||||||
// comparator**.
|
// comparator**.
|
||||||
|
|
|
@ -203,7 +203,7 @@ where
|
||||||
#[tracing::instrument(skip(self, iter), fields(%self), level = "trace")]
|
#[tracing::instrument(skip(self, iter), fields(%self), level = "trace")]
|
||||||
pub fn insert_batch<'a, I, K, V>(&'a self, iter: I)
|
pub fn insert_batch<'a, I, K, V>(&'a self, iter: I)
|
||||||
where
|
where
|
||||||
I: Iterator<Item = &'a (K, V)> + Send + Debug,
|
I: Iterator<Item = (K, V)> + Send + Debug,
|
||||||
K: AsRef<[u8]> + Sized + Debug + 'a,
|
K: AsRef<[u8]> + Sized + Debug + 'a,
|
||||||
V: AsRef<[u8]> + Sized + 'a,
|
V: AsRef<[u8]> + Sized + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -136,6 +136,14 @@ pub(crate) fn cf_options(
|
||||||
cache_size(cfg, cfg.eventidshort_cache_capacity, 64)?,
|
cache_size(cfg, cfg.eventidshort_cache_capacity, 64)?,
|
||||||
),
|
),
|
||||||
|
|
||||||
|
"eventid_pduid" => set_table_with_new_cache(
|
||||||
|
&mut opts,
|
||||||
|
cfg,
|
||||||
|
cache,
|
||||||
|
name,
|
||||||
|
cache_size(cfg, cfg.eventid_pdu_cache_capacity, 64)?,
|
||||||
|
),
|
||||||
|
|
||||||
"shorteventid_authchain" => {
|
"shorteventid_authchain" => {
|
||||||
set_table_with_new_cache(
|
set_table_with_new_cache(
|
||||||
&mut opts,
|
&mut opts,
|
||||||
|
|
|
@ -1,9 +1,4 @@
|
||||||
use std::{
|
use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _};
|
||||||
collections::{HashMap, HashSet},
|
|
||||||
fmt::Write as _,
|
|
||||||
fs::OpenOptions,
|
|
||||||
io::Write as _,
|
|
||||||
};
|
|
||||||
|
|
||||||
use proc_macro::TokenStream;
|
use proc_macro::TokenStream;
|
||||||
use proc_macro2::Span;
|
use proc_macro2::Span;
|
||||||
|
@ -13,7 +8,10 @@ use syn::{
|
||||||
ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath,
|
ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{utils::is_cargo_build, Result};
|
use crate::{
|
||||||
|
utils::{get_simple_settings, is_cargo_build},
|
||||||
|
Result,
|
||||||
|
};
|
||||||
|
|
||||||
const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it.";
|
const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it.";
|
||||||
|
|
||||||
|
@ -29,7 +27,7 @@ pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result<Toke
|
||||||
#[allow(clippy::needless_pass_by_value)]
|
#[allow(clippy::needless_pass_by_value)]
|
||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> {
|
fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> {
|
||||||
let settings = get_settings(args);
|
let settings = get_simple_settings(args);
|
||||||
|
|
||||||
let filename = settings
|
let filename = settings
|
||||||
.get("filename")
|
.get("filename")
|
||||||
|
@ -120,39 +118,6 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_settings(args: &[Meta]) -> HashMap<String, String> {
|
|
||||||
let mut map = HashMap::new();
|
|
||||||
for arg in args {
|
|
||||||
let Meta::NameValue(MetaNameValue {
|
|
||||||
path,
|
|
||||||
value,
|
|
||||||
..
|
|
||||||
}) = arg
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Expr::Lit(
|
|
||||||
ExprLit {
|
|
||||||
lit: Lit::Str(str),
|
|
||||||
..
|
|
||||||
},
|
|
||||||
..,
|
|
||||||
) = value
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(key) = path.segments.iter().next().map(|s| s.ident.clone()) else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
map.insert(key.to_string(), str.value());
|
|
||||||
}
|
|
||||||
|
|
||||||
map
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_default(field: &Field) -> Option<String> {
|
fn get_default(field: &Field) -> Option<String> {
|
||||||
for attr in &field.attrs {
|
for attr in &field.attrs {
|
||||||
let Meta::List(MetaList {
|
let Meta::List(MetaList {
|
||||||
|
|
|
@ -1,7 +1,39 @@
|
||||||
use syn::{parse_str, Expr, Generics, Lit, Meta};
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use syn::{parse_str, Expr, ExprLit, Generics, Lit, Meta, MetaNameValue};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
|
pub(crate) fn get_simple_settings(args: &[Meta]) -> HashMap<String, String> {
|
||||||
|
args.iter().fold(HashMap::new(), |mut map, arg| {
|
||||||
|
let Meta::NameValue(MetaNameValue {
|
||||||
|
path,
|
||||||
|
value,
|
||||||
|
..
|
||||||
|
}) = arg
|
||||||
|
else {
|
||||||
|
return map;
|
||||||
|
};
|
||||||
|
|
||||||
|
let Expr::Lit(
|
||||||
|
ExprLit {
|
||||||
|
lit: Lit::Str(str),
|
||||||
|
..
|
||||||
|
},
|
||||||
|
..,
|
||||||
|
) = value
|
||||||
|
else {
|
||||||
|
return map;
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(key) = path.segments.iter().next().map(|s| s.ident.clone()) {
|
||||||
|
map.insert(key.to_string(), str.value());
|
||||||
|
}
|
||||||
|
|
||||||
|
map
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn is_cargo_build() -> bool {
|
pub(crate) fn is_cargo_build() -> bool {
|
||||||
std::env::args()
|
std::env::args()
|
||||||
.find(|flag| flag.starts_with("--emit"))
|
.find(|flag| flag.starts_with("--emit"))
|
||||||
|
|
|
@ -5,12 +5,14 @@ use std::path::PathBuf;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use conduit::{
|
use conduit::{
|
||||||
config::{Figment, FigmentValue},
|
config::{Figment, FigmentValue},
|
||||||
err, toml, Err, Result,
|
err, toml,
|
||||||
|
utils::available_parallelism,
|
||||||
|
Err, Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Commandline arguments
|
/// Commandline arguments
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
#[clap(version = conduit::version(), about, long_about = None)]
|
#[clap(version = conduit::version(), about, long_about = None, name = "conduwuit")]
|
||||||
pub(crate) struct Args {
|
pub(crate) struct Args {
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
/// Path to the config TOML file (optional)
|
/// Path to the config TOML file (optional)
|
||||||
|
@ -32,6 +34,10 @@ pub(crate) struct Args {
|
||||||
/// Set functional testing modes if available. Ex '--test=smoke'
|
/// Set functional testing modes if available. Ex '--test=smoke'
|
||||||
#[arg(long, hide(true))]
|
#[arg(long, hide(true))]
|
||||||
pub(crate) test: Vec<String>,
|
pub(crate) test: Vec<String>,
|
||||||
|
|
||||||
|
/// Override the tokio worker_thread count.
|
||||||
|
#[arg(long, hide(true), env = "TOKIO_WORKER_THREADS", default_value = available_parallelism().to_string())]
|
||||||
|
pub(crate) worker_threads: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parse commandline arguments into structured data
|
/// Parse commandline arguments into structured data
|
||||||
|
|
|
@ -9,12 +9,11 @@ mod tracing;
|
||||||
extern crate conduit_core as conduit;
|
extern crate conduit_core as conduit;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
cmp,
|
|
||||||
sync::{atomic::Ordering, Arc},
|
sync::{atomic::Ordering, Arc},
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{debug_info, error, rustc_flags_capture, utils::available_parallelism, Error, Result};
|
use conduit::{debug_info, error, rustc_flags_capture, Error, Result};
|
||||||
use server::Server;
|
use server::Server;
|
||||||
use tokio::runtime;
|
use tokio::runtime;
|
||||||
|
|
||||||
|
@ -30,7 +29,7 @@ fn main() -> Result<(), Error> {
|
||||||
.enable_io()
|
.enable_io()
|
||||||
.enable_time()
|
.enable_time()
|
||||||
.thread_name(WORKER_NAME)
|
.thread_name(WORKER_NAME)
|
||||||
.worker_threads(cmp::max(WORKER_MIN, available_parallelism()))
|
.worker_threads(args.worker_threads.max(WORKER_MIN))
|
||||||
.thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE))
|
.thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE))
|
||||||
.build()
|
.build()
|
||||||
.expect("built runtime");
|
.expect("built runtime");
|
||||||
|
|
|
@ -23,7 +23,7 @@ pub(super) async fn serve(
|
||||||
|
|
||||||
if cfg!(unix) && config.unix_socket_path.is_some() {
|
if cfg!(unix) && config.unix_socket_path.is_some() {
|
||||||
unix::serve(server, app, shutdown).await
|
unix::serve(server, app, shutdown).await
|
||||||
} else if config.tls.is_some() {
|
} else if config.tls.certs.is_some() {
|
||||||
#[cfg(feature = "direct_tls")]
|
#[cfg(feature = "direct_tls")]
|
||||||
return tls::serve(server, app, handle, addrs).await;
|
return tls::serve(server, app, handle, addrs).await;
|
||||||
|
|
||||||
|
|
|
@ -6,17 +6,20 @@ use axum_server_dual_protocol::{
|
||||||
axum_server::{bind_rustls, tls_rustls::RustlsConfig},
|
axum_server::{bind_rustls, tls_rustls::RustlsConfig},
|
||||||
ServerExt,
|
ServerExt,
|
||||||
};
|
};
|
||||||
use conduit::{Result, Server};
|
use conduit::{err, Result, Server};
|
||||||
use tokio::task::JoinSet;
|
use tokio::task::JoinSet;
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
pub(super) async fn serve(
|
pub(super) async fn serve(server: &Arc<Server>, app: Router, handle: ServerHandle, addrs: Vec<SocketAddr>) -> Result {
|
||||||
server: &Arc<Server>, app: Router, handle: ServerHandle, addrs: Vec<SocketAddr>,
|
let tls = &server.config.tls;
|
||||||
) -> Result<()> {
|
let certs = tls
|
||||||
let config = &server.config;
|
.certs
|
||||||
let tls = config.tls.as_ref().expect("TLS configuration");
|
.as_ref()
|
||||||
let certs = &tls.certs;
|
.ok_or(err!(Config("tls.certs", "Missing required value in tls config section")))?;
|
||||||
let key = &tls.key;
|
let key = tls
|
||||||
|
.key
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(err!(Config("tls.key", "Missing required value in tls config section")))?;
|
||||||
|
|
||||||
// we use ring for ruma and hashing state, but aws-lc-rs is the new default.
|
// we use ring for ruma and hashing state, but aws-lc-rs is the new default.
|
||||||
// without this, TLS mode will panic.
|
// without this, TLS mode will panic.
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
use std::{collections::HashMap, sync::Arc};
|
use std::sync::Arc;
|
||||||
|
|
||||||
use conduit::{
|
use conduit::{
|
||||||
implement,
|
err, implement,
|
||||||
utils::{stream::TryIgnore, ReadyExt},
|
utils::{result::LogErr, stream::TryIgnore, ReadyExt},
|
||||||
Err, Error, Result,
|
Err, Result,
|
||||||
};
|
};
|
||||||
use database::{Deserialized, Handle, Json, Map};
|
use database::{Deserialized, Handle, Interfix, Json, Map};
|
||||||
use futures::{StreamExt, TryFutureExt};
|
use futures::{Stream, StreamExt, TryFutureExt};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
events::{
|
events::{
|
||||||
AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType,
|
AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType,
|
||||||
|
@ -112,46 +112,27 @@ pub async fn get_raw(&self, room_id: Option<&RoomId>, user_id: &UserId, kind: &s
|
||||||
|
|
||||||
/// Returns all changes to the account data that happened after `since`.
|
/// Returns all changes to the account data that happened after `since`.
|
||||||
#[implement(Service)]
|
#[implement(Service)]
|
||||||
pub async fn changes_since(
|
pub fn changes_since<'a>(
|
||||||
&self, room_id: Option<&RoomId>, user_id: &UserId, since: u64,
|
&'a self, room_id: Option<&'a RoomId>, user_id: &'a UserId, since: u64,
|
||||||
) -> Result<Vec<AnyRawAccountDataEvent>> {
|
) -> impl Stream<Item = AnyRawAccountDataEvent> + Send + 'a {
|
||||||
let mut userdata = HashMap::new();
|
let prefix = (room_id, user_id, Interfix);
|
||||||
|
let prefix = database::serialize_to_vec(prefix).expect("failed to serialize prefix");
|
||||||
let mut prefix = room_id
|
|
||||||
.map(ToString::to_string)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_bytes()
|
|
||||||
.to_vec();
|
|
||||||
prefix.push(0xFF);
|
|
||||||
prefix.extend_from_slice(user_id.as_bytes());
|
|
||||||
prefix.push(0xFF);
|
|
||||||
|
|
||||||
// Skip the data that's exactly at since, because we sent that last time
|
// Skip the data that's exactly at since, because we sent that last time
|
||||||
let mut first_possible = prefix.clone();
|
let first_possible = (room_id, user_id, since.saturating_add(1));
|
||||||
first_possible.extend_from_slice(&(since.saturating_add(1)).to_be_bytes());
|
|
||||||
|
|
||||||
self.db
|
self.db
|
||||||
.roomuserdataid_accountdata
|
.roomuserdataid_accountdata
|
||||||
.raw_stream_from(&first_possible)
|
.stream_from_raw(&first_possible)
|
||||||
.ignore_err()
|
.ignore_err()
|
||||||
.ready_take_while(move |(k, _)| k.starts_with(&prefix))
|
.ready_take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
.map(|(k, v)| {
|
.map(move |(_, v)| {
|
||||||
let v = match room_id {
|
match room_id {
|
||||||
None => serde_json::from_slice::<Raw<AnyGlobalAccountDataEvent>>(v)
|
Some(_) => serde_json::from_slice::<Raw<AnyRoomAccountDataEvent>>(v).map(AnyRawAccountDataEvent::Room),
|
||||||
.map(AnyRawAccountDataEvent::Global)
|
None => serde_json::from_slice::<Raw<AnyGlobalAccountDataEvent>>(v).map(AnyRawAccountDataEvent::Global),
|
||||||
.map_err(|_| Error::bad_database("Database contains invalid account data."))?,
|
}
|
||||||
Some(_) => serde_json::from_slice::<Raw<AnyRoomAccountDataEvent>>(v)
|
.map_err(|e| err!(Database("Database contains invalid account data: {e}")))
|
||||||
.map(AnyRawAccountDataEvent::Room)
|
.log_err()
|
||||||
.map_err(|_| Error::bad_database("Database contains invalid account data."))?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((k.to_owned(), v))
|
|
||||||
})
|
})
|
||||||
.ignore_err()
|
.ignore_err()
|
||||||
.ready_for_each(|(kind, data)| {
|
|
||||||
userdata.insert(kind, data);
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(userdata.into_values().collect())
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,26 +79,24 @@ impl Service {
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `service_name` - the name you send to register the service previously
|
/// * `service_name` - the registration ID of the appservice
|
||||||
pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
pub async fn unregister_appservice(&self, appservice_id: &str) -> Result<()> {
|
||||||
// removes the appservice registration info
|
// removes the appservice registration info
|
||||||
self.registration_info
|
self.registration_info
|
||||||
.write()
|
.write()
|
||||||
.await
|
.await
|
||||||
.remove(service_name)
|
.remove(appservice_id)
|
||||||
.ok_or(err!("Appservice not found"))?;
|
.ok_or(err!("Appservice not found"))?;
|
||||||
|
|
||||||
// remove the appservice from the database
|
// remove the appservice from the database
|
||||||
self.db.id_appserviceregistrations.remove(service_name);
|
self.db.id_appserviceregistrations.del(appservice_id);
|
||||||
|
|
||||||
// deletes all active requests for the appservice if there are any so we stop
|
// deletes all active requests for the appservice if there are any so we stop
|
||||||
// sending to the URL
|
// sending to the URL
|
||||||
self.services
|
self.services
|
||||||
.sending
|
.sending
|
||||||
.cleanup_events(service_name.to_owned())
|
.cleanup_events(Some(appservice_id), None, None)
|
||||||
.await;
|
.await
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_registration(&self, id: &str) -> Option<Registration> {
|
pub async fn get_registration(&self, id: &str) -> Option<Registration> {
|
||||||
|
|
|
@ -12,11 +12,9 @@ use data::Data;
|
||||||
use ipaddress::IPAddress;
|
use ipaddress::IPAddress;
|
||||||
use regex::RegexSet;
|
use regex::RegexSet;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::discovery::discover_support::ContactRole, OwnedEventId, OwnedRoomAliasId, OwnedServerName,
|
OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, RoomVersionId, ServerName, UserId,
|
||||||
OwnedUserId, RoomAliasId, RoomVersionId, ServerName, UserId,
|
|
||||||
};
|
};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
use crate::service;
|
use crate::service;
|
||||||
|
|
||||||
|
@ -243,14 +241,6 @@ impl Service {
|
||||||
|
|
||||||
pub fn allow_outgoing_read_receipts(&self) -> bool { self.config.allow_outgoing_read_receipts }
|
pub fn allow_outgoing_read_receipts(&self) -> bool { self.config.allow_outgoing_read_receipts }
|
||||||
|
|
||||||
pub fn well_known_support_page(&self) -> &Option<Url> { &self.config.well_known.support_page }
|
|
||||||
|
|
||||||
pub fn well_known_support_role(&self) -> &Option<ContactRole> { &self.config.well_known.support_role }
|
|
||||||
|
|
||||||
pub fn well_known_support_email(&self) -> &Option<String> { &self.config.well_known.support_email }
|
|
||||||
|
|
||||||
pub fn well_known_support_mxid(&self) -> &Option<OwnedUserId> { &self.config.well_known.support_mxid }
|
|
||||||
|
|
||||||
pub fn block_non_admin_invites(&self) -> bool { self.config.block_non_admin_invites }
|
pub fn block_non_admin_invites(&self) -> bool { self.config.block_non_admin_invites }
|
||||||
|
|
||||||
pub fn supported_room_versions(&self) -> Vec<RoomVersionId> {
|
pub fn supported_room_versions(&self) -> Vec<RoomVersionId> {
|
||||||
|
@ -265,10 +255,6 @@ impl Service {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn well_known_client(&self) -> &Option<Url> { &self.config.well_known.client }
|
|
||||||
|
|
||||||
pub fn well_known_server(&self) -> &Option<OwnedServerName> { &self.config.well_known.server }
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool {
|
pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool {
|
||||||
for cidr in &self.cidr_range_denylist {
|
for cidr in &self.cidr_range_denylist {
|
||||||
|
|
|
@ -2,9 +2,9 @@ use std::{fmt::Debug, mem, sync::Arc};
|
||||||
|
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use conduit::{
|
use conduit::{
|
||||||
debug_error, err, trace,
|
debug_warn, err, trace,
|
||||||
utils::{stream::TryIgnore, string_from_bytes},
|
utils::{stream::TryIgnore, string_from_bytes},
|
||||||
Err, PduEvent, Result,
|
warn, Err, PduEvent, Result,
|
||||||
};
|
};
|
||||||
use database::{Deserialized, Ignore, Interfix, Json, Map};
|
use database::{Deserialized, Ignore, Interfix, Json, Map};
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{Stream, StreamExt};
|
||||||
|
@ -26,7 +26,7 @@ use ruma::{
|
||||||
uint, RoomId, UInt, UserId,
|
uint, RoomId, UInt, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{client, globals, rooms, users, Dep};
|
use crate::{client, globals, rooms, sending, users, Dep};
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
db: Data,
|
db: Data,
|
||||||
|
@ -39,6 +39,7 @@ struct Services {
|
||||||
state_accessor: Dep<rooms::state_accessor::Service>,
|
state_accessor: Dep<rooms::state_accessor::Service>,
|
||||||
state_cache: Dep<rooms::state_cache::Service>,
|
state_cache: Dep<rooms::state_cache::Service>,
|
||||||
users: Dep<users::Service>,
|
users: Dep<users::Service>,
|
||||||
|
sending: Dep<sending::Service>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Data {
|
struct Data {
|
||||||
|
@ -57,6 +58,7 @@ impl crate::Service for Service {
|
||||||
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
|
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
|
||||||
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
|
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
|
||||||
users: args.depend::<users::Service>("users"),
|
users: args.depend::<users::Service>("users"),
|
||||||
|
sending: args.depend::<sending::Service>("sending"),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -65,17 +67,35 @@ impl crate::Service for Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
pub fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) {
|
pub async fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) -> Result {
|
||||||
match pusher {
|
match pusher {
|
||||||
set_pusher::v3::PusherAction::Post(data) => {
|
set_pusher::v3::PusherAction::Post(data) => {
|
||||||
let key = (sender, &data.pusher.ids.pushkey);
|
let pushkey = data.pusher.ids.pushkey.as_str();
|
||||||
|
|
||||||
|
if pushkey.len() > 512 {
|
||||||
|
return Err!(Request(InvalidParam("Push key length cannot be greater than 512 bytes.")));
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.pusher.ids.app_id.as_str().len() > 64 {
|
||||||
|
return Err!(Request(InvalidParam("App ID length cannot be greater than 64 bytes.")));
|
||||||
|
}
|
||||||
|
|
||||||
|
let key = (sender, data.pusher.ids.pushkey.as_str());
|
||||||
self.db.senderkey_pusher.put(key, Json(pusher));
|
self.db.senderkey_pusher.put(key, Json(pusher));
|
||||||
},
|
},
|
||||||
set_pusher::v3::PusherAction::Delete(ids) => {
|
set_pusher::v3::PusherAction::Delete(ids) => {
|
||||||
let key = (sender, &ids.pushkey);
|
let key = (sender, ids.pushkey.as_str());
|
||||||
self.db.senderkey_pusher.del(key);
|
self.db.senderkey_pusher.del(key);
|
||||||
|
|
||||||
|
self.services
|
||||||
|
.sending
|
||||||
|
.cleanup_events(None, Some(sender), Some(ids.pushkey.as_str()))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result<Pusher> {
|
pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result<Pusher> {
|
||||||
|
@ -166,8 +186,8 @@ impl Service {
|
||||||
let body = response.bytes().await?; // TODO: handle timeout
|
let body = response.bytes().await?; // TODO: handle timeout
|
||||||
|
|
||||||
if !status.is_success() {
|
if !status.is_success() {
|
||||||
debug_error!("Push gateway response body: {:?}", string_from_bytes(&body));
|
debug_warn!("Push gateway response body: {:?}", string_from_bytes(&body));
|
||||||
return Err!(BadServerResponse(error!(
|
return Err!(BadServerResponse(warn!(
|
||||||
"Push gateway {dest} returned unsuccessful HTTP response: {status}"
|
"Push gateway {dest} returned unsuccessful HTTP response: {status}"
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
@ -178,10 +198,10 @@ impl Service {
|
||||||
.expect("reqwest body is valid http body"),
|
.expect("reqwest body is valid http body"),
|
||||||
);
|
);
|
||||||
response
|
response
|
||||||
.map_err(|e| err!(BadServerResponse(error!("Push gateway {dest} returned invalid response: {e}"))))
|
.map_err(|e| err!(BadServerResponse(warn!("Push gateway {dest} returned invalid response: {e}"))))
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug_error!("Could not send request to pusher {dest}: {e}");
|
warn!("Could not send request to pusher {dest}: {e}");
|
||||||
Err(e.into())
|
Err(e.into())
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -278,11 +298,7 @@ impl Service {
|
||||||
// TODO: email
|
// TODO: email
|
||||||
match &pusher.kind {
|
match &pusher.kind {
|
||||||
PusherKind::Http(http) => {
|
PusherKind::Http(http) => {
|
||||||
// TODO:
|
// TODO (timo): can pusher/devices have conflicting formats
|
||||||
// Two problems with this
|
|
||||||
// 1. if "event_id_only" is the only format kind it seems we should never add
|
|
||||||
// more info
|
|
||||||
// 2. can pusher/devices have conflicting formats
|
|
||||||
let event_id_only = http.format == Some(PushFormat::EventIdOnly);
|
let event_id_only = http.format == Some(PushFormat::EventIdOnly);
|
||||||
|
|
||||||
let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone());
|
let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone());
|
||||||
|
@ -297,24 +313,24 @@ impl Service {
|
||||||
let d = vec![device];
|
let d = vec![device];
|
||||||
let mut notifi = Notification::new(d);
|
let mut notifi = Notification::new(d);
|
||||||
|
|
||||||
notifi.prio = NotificationPriority::Low;
|
|
||||||
notifi.event_id = Some((*event.event_id).to_owned());
|
notifi.event_id = Some((*event.event_id).to_owned());
|
||||||
notifi.room_id = Some((*event.room_id).to_owned());
|
notifi.room_id = Some((*event.room_id).to_owned());
|
||||||
// TODO: missed calls
|
// TODO: missed calls
|
||||||
notifi.counts = NotificationCounts::new(unread, uint!(0));
|
notifi.counts = NotificationCounts::new(unread, uint!(0));
|
||||||
|
|
||||||
|
if event_id_only {
|
||||||
|
self.send_request(&http.url, send_event_notification::v1::Request::new(notifi))
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
if event.kind == TimelineEventType::RoomEncrypted
|
if event.kind == TimelineEventType::RoomEncrypted
|
||||||
|| tweaks
|
|| tweaks
|
||||||
.iter()
|
.iter()
|
||||||
.any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_)))
|
.any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_)))
|
||||||
{
|
{
|
||||||
notifi.prio = NotificationPriority::High;
|
notifi.prio = NotificationPriority::High;
|
||||||
}
|
|
||||||
|
|
||||||
if event_id_only {
|
|
||||||
self.send_request(&http.url, send_event_notification::v1::Request::new(notifi))
|
|
||||||
.await?;
|
|
||||||
} else {
|
} else {
|
||||||
|
notifi.prio = NotificationPriority::Low;
|
||||||
|
}
|
||||||
notifi.sender = Some(event.sender.clone());
|
notifi.sender = Some(event.sender.clone());
|
||||||
notifi.event_type = Some(event.kind.clone());
|
notifi.event_type = Some(event.kind.clone());
|
||||||
notifi.content = serde_json::value::to_raw_value(&event.content).ok();
|
notifi.content = serde_json::value::to_raw_value(&event.content).ok();
|
||||||
|
|
|
@ -4,7 +4,7 @@ use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{debug, debug_error, debug_info, debug_warn, err, trace, Err, Result};
|
use conduit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result};
|
||||||
use hickory_resolver::{error::ResolveError, lookup::SrvLookup};
|
use hickory_resolver::{error::ResolveError, lookup::SrvLookup};
|
||||||
use ipaddress::IPAddress;
|
use ipaddress::IPAddress;
|
||||||
use ruma::ServerName;
|
use ruma::ServerName;
|
||||||
|
@ -313,7 +313,6 @@ impl super::Service {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::single_match_else)]
|
|
||||||
fn handle_resolve_error(e: &ResolveError) -> Result<()> {
|
fn handle_resolve_error(e: &ResolveError) -> Result<()> {
|
||||||
use hickory_resolver::error::ResolveErrorKind;
|
use hickory_resolver::error::ResolveErrorKind;
|
||||||
|
|
||||||
|
@ -322,10 +321,21 @@ impl super::Service {
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
// Raise to debug_warn if we can find out the result wasn't from cache
|
// Raise to debug_warn if we can find out the result wasn't from cache
|
||||||
debug!("{e}");
|
debug!("No DNS records found: {e}");
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
_ => Err!(error!("DNS {e}")),
|
ResolveErrorKind::Timeout => {
|
||||||
|
Err!(warn!("DNS {e}"))
|
||||||
|
},
|
||||||
|
ResolveErrorKind::NoConnections => {
|
||||||
|
error!(
|
||||||
|
"Your DNS server is overloaded and has ran out of connections. It is strongly recommended you \
|
||||||
|
remediate this issue to ensure proper federation connectivity."
|
||||||
|
);
|
||||||
|
|
||||||
|
Err!(error!("DNS error: {e}"))
|
||||||
|
},
|
||||||
|
_ => Err!(error!("DNS error: {e}")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ mod data;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeSet, HashSet},
|
collections::{BTreeSet, HashSet},
|
||||||
|
fmt::Debug,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -37,9 +38,12 @@ impl crate::Service for Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
pub async fn event_ids_iter(
|
pub async fn event_ids_iter<'a, I>(
|
||||||
&self, room_id: &RoomId, starting_events: &[&EventId],
|
&'a self, room_id: &RoomId, starting_events: I,
|
||||||
) -> Result<impl Stream<Item = Arc<EventId>> + Send + '_> {
|
) -> Result<impl Stream<Item = Arc<EventId>> + Send + '_>
|
||||||
|
where
|
||||||
|
I: Iterator<Item = &'a EventId> + Clone + Debug + ExactSizeIterator + Send + 'a,
|
||||||
|
{
|
||||||
let stream = self
|
let stream = self
|
||||||
.get_event_ids(room_id, starting_events)
|
.get_event_ids(room_id, starting_events)
|
||||||
.await?
|
.await?
|
||||||
|
@ -49,12 +53,15 @@ impl Service {
|
||||||
Ok(stream)
|
Ok(stream)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_event_ids(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result<Vec<Arc<EventId>>> {
|
pub async fn get_event_ids<'a, I>(&'a self, room_id: &RoomId, starting_events: I) -> Result<Vec<Arc<EventId>>>
|
||||||
|
where
|
||||||
|
I: Iterator<Item = &'a EventId> + Clone + Debug + ExactSizeIterator + Send + 'a,
|
||||||
|
{
|
||||||
let chain = self.get_auth_chain(room_id, starting_events).await?;
|
let chain = self.get_auth_chain(room_id, starting_events).await?;
|
||||||
let event_ids = self
|
let event_ids = self
|
||||||
.services
|
.services
|
||||||
.short
|
.short
|
||||||
.multi_get_eventid_from_short(&chain)
|
.multi_get_eventid_from_short(chain.into_iter())
|
||||||
.await
|
.await
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(Result::ok)
|
.filter_map(Result::ok)
|
||||||
|
@ -64,7 +71,10 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, name = "auth_chain")]
|
#[tracing::instrument(skip_all, name = "auth_chain")]
|
||||||
pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result<Vec<ShortEventId>> {
|
pub async fn get_auth_chain<'a, I>(&'a self, room_id: &RoomId, starting_events: I) -> Result<Vec<ShortEventId>>
|
||||||
|
where
|
||||||
|
I: Iterator<Item = &'a EventId> + Clone + Debug + ExactSizeIterator + Send + 'a,
|
||||||
|
{
|
||||||
const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db?
|
const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db?
|
||||||
const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new();
|
const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new();
|
||||||
|
|
||||||
|
@ -72,19 +82,19 @@ impl Service {
|
||||||
let mut starting_ids = self
|
let mut starting_ids = self
|
||||||
.services
|
.services
|
||||||
.short
|
.short
|
||||||
.multi_get_or_create_shorteventid(starting_events)
|
.multi_get_or_create_shorteventid(starting_events.clone())
|
||||||
.enumerate()
|
.zip(starting_events.clone().stream())
|
||||||
.boxed();
|
.boxed();
|
||||||
|
|
||||||
let mut buckets = [BUCKET; NUM_BUCKETS];
|
let mut buckets = [BUCKET; NUM_BUCKETS];
|
||||||
while let Some((i, short)) = starting_ids.next().await {
|
while let Some((short, starting_event)) = starting_ids.next().await {
|
||||||
let bucket: usize = short.try_into()?;
|
let bucket: usize = short.try_into()?;
|
||||||
let bucket: usize = validated!(bucket % NUM_BUCKETS);
|
let bucket: usize = validated!(bucket % NUM_BUCKETS);
|
||||||
buckets[bucket].insert((short, starting_events[i]));
|
buckets[bucket].insert((short, starting_event));
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
starting_events = ?starting_events.len(),
|
starting_events = ?starting_events.count(),
|
||||||
elapsed = ?started.elapsed(),
|
elapsed = ?started.elapsed(),
|
||||||
"start",
|
"start",
|
||||||
);
|
);
|
||||||
|
|
|
@ -35,12 +35,12 @@ pub async fn resolve_state(
|
||||||
let fork_states = [current_state_ids, incoming_state];
|
let fork_states = [current_state_ids, incoming_state];
|
||||||
let mut auth_chain_sets = Vec::with_capacity(fork_states.len());
|
let mut auth_chain_sets = Vec::with_capacity(fork_states.len());
|
||||||
for state in &fork_states {
|
for state in &fork_states {
|
||||||
let starting_events: Vec<&EventId> = state.values().map(Borrow::borrow).collect();
|
let starting_events = state.values().map(Borrow::borrow);
|
||||||
|
|
||||||
let auth_chain: HashSet<Arc<EventId>> = self
|
let auth_chain: HashSet<Arc<EventId>> = self
|
||||||
.services
|
.services
|
||||||
.auth_chain
|
.auth_chain
|
||||||
.get_event_ids(room_id, &starting_events)
|
.get_event_ids(room_id, starting_events)
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -139,7 +139,7 @@ pub(super) async fn state_at_incoming_resolved(
|
||||||
let auth_chain: HashSet<Arc<EventId>> = self
|
let auth_chain: HashSet<Arc<EventId>> = self
|
||||||
.services
|
.services
|
||||||
.auth_chain
|
.auth_chain
|
||||||
.get_event_ids(room_id, &starting_events)
|
.get_event_ids(room_id, starting_events.into_iter())
|
||||||
.await?
|
.await?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -42,16 +42,14 @@ impl Data {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) {
|
pub(super) async fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) {
|
||||||
type KeyVal<'a> = (&'a RoomId, u64, &'a UserId);
|
|
||||||
|
|
||||||
// Remove old entry
|
// Remove old entry
|
||||||
let last_possible_key = (room_id, u64::MAX);
|
let last_possible_key = (room_id, u64::MAX);
|
||||||
self.readreceiptid_readreceipt
|
self.readreceiptid_readreceipt
|
||||||
.rev_keys_from(&last_possible_key)
|
.rev_keys_from_raw(&last_possible_key)
|
||||||
.ignore_err()
|
.ignore_err()
|
||||||
.ready_take_while(|(r, ..): &KeyVal<'_>| *r == room_id)
|
.ready_take_while(|key| key.starts_with(room_id.as_bytes()))
|
||||||
.ready_filter_map(|(r, c, u): KeyVal<'_>| (u == user_id).then_some((r, c, u)))
|
.ready_filter_map(|key| key.ends_with(user_id.as_bytes()).then_some(key))
|
||||||
.ready_for_each(|old: KeyVal<'_>| self.readreceiptid_readreceipt.del(old))
|
.ready_for_each(|key| self.readreceiptid_readreceipt.del(key))
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let count = self.services.globals.next_count().unwrap();
|
let count = self.services.globals.next_count().unwrap();
|
||||||
|
|
|
@ -73,11 +73,13 @@ pub fn index_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_bod
|
||||||
key.extend_from_slice(word.as_bytes());
|
key.extend_from_slice(word.as_bytes());
|
||||||
key.push(0xFF);
|
key.push(0xFF);
|
||||||
key.extend_from_slice(pdu_id.as_ref()); // TODO: currently we save the room id a second time here
|
key.extend_from_slice(pdu_id.as_ref()); // TODO: currently we save the room id a second time here
|
||||||
(key, Vec::<u8>::new())
|
key
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
self.db.tokenids.insert_batch(batch.iter());
|
self.db
|
||||||
|
.tokenids
|
||||||
|
.insert_batch(batch.iter().map(|k| (k.as_slice(), &[])));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Service)]
|
#[implement(Service)]
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::{mem::size_of_val, sync::Arc};
|
use std::{fmt::Debug, mem::size_of_val, sync::Arc};
|
||||||
|
|
||||||
pub use conduit::pdu::{ShortEventId, ShortId, ShortRoomId};
|
pub use conduit::pdu::{ShortEventId, ShortId, ShortRoomId};
|
||||||
use conduit::{err, implement, utils, Result};
|
use conduit::{err, implement, utils, utils::stream::ReadyExt, Result};
|
||||||
use database::{Deserialized, Map};
|
use database::{Deserialized, Map};
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{Stream, StreamExt};
|
||||||
use ruma::{events::StateEventType, EventId, RoomId};
|
use ruma::{events::StateEventType, EventId, RoomId};
|
||||||
|
@ -51,37 +51,33 @@ impl crate::Service for Service {
|
||||||
|
|
||||||
#[implement(Service)]
|
#[implement(Service)]
|
||||||
pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEventId {
|
pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEventId {
|
||||||
const BUFSIZE: usize = size_of::<ShortEventId>();
|
|
||||||
|
|
||||||
if let Ok(shorteventid) = self.get_shorteventid(event_id).await {
|
if let Ok(shorteventid) = self.get_shorteventid(event_id).await {
|
||||||
return shorteventid;
|
return shorteventid;
|
||||||
}
|
}
|
||||||
|
|
||||||
let shorteventid = self.services.globals.next_count().unwrap();
|
self.create_shorteventid(event_id)
|
||||||
debug_assert!(size_of_val(&shorteventid) == BUFSIZE, "buffer requirement changed");
|
|
||||||
|
|
||||||
self.db
|
|
||||||
.eventid_shorteventid
|
|
||||||
.raw_aput::<BUFSIZE, _, _>(event_id, shorteventid);
|
|
||||||
|
|
||||||
self.db
|
|
||||||
.shorteventid_eventid
|
|
||||||
.aput_raw::<BUFSIZE, _, _>(shorteventid, event_id);
|
|
||||||
|
|
||||||
shorteventid
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Service)]
|
#[implement(Service)]
|
||||||
pub fn multi_get_or_create_shorteventid<'a>(
|
pub fn multi_get_or_create_shorteventid<'a, I>(&'a self, event_ids: I) -> impl Stream<Item = ShortEventId> + Send + '_
|
||||||
&'a self, event_ids: &'a [&EventId],
|
where
|
||||||
) -> impl Stream<Item = ShortEventId> + Send + 'a {
|
I: Iterator<Item = &'a EventId> + Clone + Debug + ExactSizeIterator + Send + 'a,
|
||||||
|
<I as Iterator>::Item: AsRef<[u8]> + Send + Sync + 'a,
|
||||||
|
{
|
||||||
self.db
|
self.db
|
||||||
.eventid_shorteventid
|
.eventid_shorteventid
|
||||||
.get_batch(event_ids.iter())
|
.get_batch(event_ids.clone())
|
||||||
.enumerate()
|
.ready_scan(event_ids, |event_ids, result| {
|
||||||
.map(|(i, result)| match result {
|
event_ids.next().map(|event_id| (event_id, result))
|
||||||
|
})
|
||||||
|
.map(|(event_id, result)| match result {
|
||||||
Ok(ref short) => utils::u64_from_u8(short),
|
Ok(ref short) => utils::u64_from_u8(short),
|
||||||
Err(_) => {
|
Err(_) => self.create_shorteventid(event_id),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[implement(Service)]
|
||||||
|
fn create_shorteventid(&self, event_id: &EventId) -> ShortEventId {
|
||||||
const BUFSIZE: usize = size_of::<ShortEventId>();
|
const BUFSIZE: usize = size_of::<ShortEventId>();
|
||||||
|
|
||||||
let short = self.services.globals.next_count().unwrap();
|
let short = self.services.globals.next_count().unwrap();
|
||||||
|
@ -89,14 +85,12 @@ pub fn multi_get_or_create_shorteventid<'a>(
|
||||||
|
|
||||||
self.db
|
self.db
|
||||||
.eventid_shorteventid
|
.eventid_shorteventid
|
||||||
.raw_aput::<BUFSIZE, _, _>(event_ids[i], short);
|
.raw_aput::<BUFSIZE, _, _>(event_id, short);
|
||||||
self.db
|
self.db
|
||||||
.shorteventid_eventid
|
.shorteventid_eventid
|
||||||
.aput_raw::<BUFSIZE, _, _>(short, event_ids[i]);
|
.aput_raw::<BUFSIZE, _, _>(short, event_id);
|
||||||
|
|
||||||
short
|
short
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Service)]
|
#[implement(Service)]
|
||||||
|
@ -154,13 +148,13 @@ pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Service)]
|
#[implement(Service)]
|
||||||
pub async fn multi_get_eventid_from_short(&self, shorteventid: &[ShortEventId]) -> Vec<Result<Arc<EventId>>> {
|
pub async fn multi_get_eventid_from_short<I>(&self, shorteventid: I) -> Vec<Result<Arc<EventId>>>
|
||||||
|
where
|
||||||
|
I: Iterator<Item = ShortEventId> + Send,
|
||||||
|
{
|
||||||
const BUFSIZE: usize = size_of::<ShortEventId>();
|
const BUFSIZE: usize = size_of::<ShortEventId>();
|
||||||
|
|
||||||
let keys: Vec<[u8; BUFSIZE]> = shorteventid
|
let keys: Vec<[u8; BUFSIZE]> = shorteventid.map(u64::to_be_bytes).collect();
|
||||||
.iter()
|
|
||||||
.map(|short| short.to_be_bytes())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
self.db
|
self.db
|
||||||
.shorteventid_eventid
|
.shorteventid_eventid
|
||||||
|
|
|
@ -6,7 +6,7 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{
|
use conduit::{
|
||||||
err,
|
at, err,
|
||||||
result::FlatOk,
|
result::FlatOk,
|
||||||
utils::{calculate_hash, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt},
|
utils::{calculate_hash, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt},
|
||||||
warn, PduEvent, Result,
|
warn, PduEvent, Result,
|
||||||
|
@ -23,8 +23,14 @@ use ruma::{
|
||||||
EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId,
|
EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use crate::{
|
||||||
use crate::{globals, rooms, Dep};
|
globals, rooms,
|
||||||
|
rooms::{
|
||||||
|
short::{ShortEventId, ShortStateHash},
|
||||||
|
state_compressor::{parse_compressed_state_event, CompressedStateEvent},
|
||||||
|
},
|
||||||
|
Dep,
|
||||||
|
};
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub mutex: RoomMutexMap,
|
pub mutex: RoomMutexMap,
|
||||||
|
@ -92,12 +98,12 @@ impl Service {
|
||||||
_statediffremoved: Arc<HashSet<CompressedStateEvent>>,
|
_statediffremoved: Arc<HashSet<CompressedStateEvent>>,
|
||||||
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result {
|
) -> Result {
|
||||||
let event_ids = statediffnew.iter().stream().filter_map(|new| {
|
let event_ids = statediffnew
|
||||||
self.services
|
.iter()
|
||||||
.state_compressor
|
.stream()
|
||||||
.parse_compressed_state_event(*new)
|
.map(|&new| parse_compressed_state_event(new).1)
|
||||||
.map_ok_or_else(|_| None, |(_, event_id)| Some(event_id))
|
.then(|shorteventid| self.services.short.get_eventid_from_short(shorteventid))
|
||||||
});
|
.ignore_err();
|
||||||
|
|
||||||
pin_mut!(event_ids);
|
pin_mut!(event_ids);
|
||||||
while let Some(event_id) = event_ids.next().await {
|
while let Some(event_id) = event_ids.next().await {
|
||||||
|
@ -146,8 +152,9 @@ impl Service {
|
||||||
#[tracing::instrument(skip(self, state_ids_compressed), level = "debug")]
|
#[tracing::instrument(skip(self, state_ids_compressed), level = "debug")]
|
||||||
pub async fn set_event_state(
|
pub async fn set_event_state(
|
||||||
&self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
&self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
||||||
) -> Result<u64> {
|
) -> Result<ShortStateHash> {
|
||||||
const BUFSIZE: usize = size_of::<u64>();
|
const KEY_LEN: usize = size_of::<ShortEventId>();
|
||||||
|
const VAL_LEN: usize = size_of::<ShortStateHash>();
|
||||||
|
|
||||||
let shorteventid = self
|
let shorteventid = self
|
||||||
.services
|
.services
|
||||||
|
@ -202,7 +209,7 @@ impl Service {
|
||||||
|
|
||||||
self.db
|
self.db
|
||||||
.shorteventid_shortstatehash
|
.shorteventid_shortstatehash
|
||||||
.aput::<BUFSIZE, BUFSIZE, _, _>(shorteventid, shortstatehash);
|
.aput::<KEY_LEN, VAL_LEN, _, _>(shorteventid, shortstatehash);
|
||||||
|
|
||||||
Ok(shortstatehash)
|
Ok(shortstatehash)
|
||||||
}
|
}
|
||||||
|
@ -343,7 +350,7 @@ impl Service {
|
||||||
.map_err(|e| err!(Request(NotFound("No create event found: {e:?}"))))
|
.map_err(|e| err!(Request(NotFound("No create event found: {e:?}"))))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result<u64> {
|
pub async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result<ShortStateHash> {
|
||||||
self.db
|
self.db
|
||||||
.roomid_shortstatehash
|
.roomid_shortstatehash
|
||||||
.get(room_id)
|
.get(room_id)
|
||||||
|
@ -391,57 +398,52 @@ impl Service {
|
||||||
return Ok(HashMap::new());
|
return Ok(HashMap::new());
|
||||||
};
|
};
|
||||||
|
|
||||||
let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content)?;
|
let mut sauthevents: HashMap<_, _> = state_res::auth_types_for_event(kind, sender, state_key, content)?
|
||||||
|
|
||||||
let mut sauthevents: HashMap<_, _> = auth_events
|
|
||||||
.iter()
|
.iter()
|
||||||
.stream()
|
.stream()
|
||||||
.filter_map(|(event_type, state_key)| {
|
.filter_map(|(event_type, state_key)| {
|
||||||
self.services
|
self.services
|
||||||
.short
|
.short
|
||||||
.get_shortstatekey(event_type, state_key)
|
.get_shortstatekey(event_type, state_key)
|
||||||
.map_ok(move |s| (s, (event_type, state_key)))
|
.map_ok(move |ssk| (ssk, (event_type, state_key)))
|
||||||
.map(Result::ok)
|
.map(Result::ok)
|
||||||
})
|
})
|
||||||
|
.map(|(ssk, (event_type, state_key))| (ssk, (event_type.to_owned(), state_key.to_owned())))
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let full_state = self
|
let auth_state: Vec<_> = self
|
||||||
.services
|
.services
|
||||||
.state_compressor
|
.state_accessor
|
||||||
.load_shortstatehash_info(shortstatehash)
|
.state_full_shortids(shortstatehash)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| err!(Database(error!(?room_id, ?shortstatehash, "{e:?}"))))?
|
||||||
err!(Database(
|
.into_iter()
|
||||||
"Missing shortstatehash info for {room_id:?} at {shortstatehash:?}: {e:?}"
|
.filter_map(|(shortstatekey, shorteventid)| {
|
||||||
))
|
sauthevents
|
||||||
})?
|
.remove(&shortstatekey)
|
||||||
.pop()
|
.map(|(event_type, state_key)| ((event_type, state_key), shorteventid))
|
||||||
.expect("there is always one layer")
|
})
|
||||||
.full_state;
|
.collect();
|
||||||
|
|
||||||
let mut ret = HashMap::new();
|
let auth_pdus: Vec<_> = self
|
||||||
for compressed in full_state.iter() {
|
|
||||||
let Ok((shortstatekey, event_id)) = self
|
|
||||||
.services
|
.services
|
||||||
.state_compressor
|
.short
|
||||||
.parse_compressed_state_event(*compressed)
|
.multi_get_eventid_from_short(auth_state.iter().map(at!(1)))
|
||||||
.await
|
.await
|
||||||
else {
|
.into_iter()
|
||||||
continue;
|
.stream()
|
||||||
};
|
.and_then(|event_id| async move { self.services.timeline.get_pdu(&event_id).await })
|
||||||
|
.collect()
|
||||||
|
.await;
|
||||||
|
|
||||||
let Some((ty, state_key)) = sauthevents.remove(&shortstatekey) else {
|
let auth_pdus = auth_state
|
||||||
continue;
|
.into_iter()
|
||||||
};
|
.map(at!(0))
|
||||||
|
.zip(auth_pdus.into_iter())
|
||||||
|
.filter_map(|((event_type, state_key), pdu)| Some(((event_type, state_key), pdu.ok()?)))
|
||||||
|
.collect();
|
||||||
|
|
||||||
let Ok(pdu) = self.services.timeline.get_pdu(&event_id).await else {
|
Ok(auth_pdus)
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
ret.insert((ty.to_owned(), state_key.to_owned()), pdu);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ret)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,22 @@
|
||||||
use std::{collections::HashMap, sync::Arc};
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
|
||||||
use conduit::{err, PduEvent, Result};
|
use conduit::{
|
||||||
|
at, err,
|
||||||
|
utils::stream::{IterStream, ReadyExt},
|
||||||
|
PduEvent, Result,
|
||||||
|
};
|
||||||
use database::{Deserialized, Map};
|
use database::{Deserialized, Map};
|
||||||
use futures::TryFutureExt;
|
use futures::{StreamExt, TryFutureExt};
|
||||||
use ruma::{events::StateEventType, EventId, RoomId};
|
use ruma::{events::StateEventType, EventId, RoomId};
|
||||||
|
|
||||||
use crate::{rooms, rooms::short::ShortStateHash, Dep};
|
use crate::{
|
||||||
|
rooms,
|
||||||
|
rooms::{
|
||||||
|
short::{ShortEventId, ShortStateHash, ShortStateKey},
|
||||||
|
state_compressor::parse_compressed_state_event,
|
||||||
|
},
|
||||||
|
Dep,
|
||||||
|
};
|
||||||
|
|
||||||
pub(super) struct Data {
|
pub(super) struct Data {
|
||||||
eventid_shorteventid: Arc<Map>,
|
eventid_shorteventid: Arc<Map>,
|
||||||
|
@ -35,9 +46,66 @@ impl Data {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(unused_qualifications)] // async traits
|
pub(super) async fn state_full(
|
||||||
|
&self, shortstatehash: ShortStateHash,
|
||||||
|
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
||||||
|
let state = self
|
||||||
|
.state_full_pdus(shortstatehash)
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result<Vec<Arc<PduEvent>>> {
|
||||||
|
let short_ids = self
|
||||||
|
.state_full_shortids(shortstatehash)
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(at!(1));
|
||||||
|
|
||||||
|
let event_ids = self
|
||||||
|
.services
|
||||||
|
.short
|
||||||
|
.multi_get_eventid_from_short(short_ids)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let full_pdus = event_ids
|
||||||
|
.into_iter()
|
||||||
|
.stream()
|
||||||
|
.ready_filter_map(Result::ok)
|
||||||
|
.filter_map(|event_id| async move { self.services.timeline.get_pdu(&event_id).await.ok() })
|
||||||
|
.collect()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(full_pdus)
|
||||||
|
}
|
||||||
|
|
||||||
pub(super) async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result<HashMap<u64, Arc<EventId>>> {
|
pub(super) async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result<HashMap<u64, Arc<EventId>>> {
|
||||||
let full_state = self
|
let short_ids = self.state_full_shortids(shortstatehash).await?;
|
||||||
|
|
||||||
|
let event_ids = self
|
||||||
|
.services
|
||||||
|
.short
|
||||||
|
.multi_get_eventid_from_short(short_ids.iter().map(at!(1)))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let full_ids = short_ids
|
||||||
|
.into_iter()
|
||||||
|
.map(at!(0))
|
||||||
|
.zip(event_ids.into_iter())
|
||||||
|
.filter_map(|(shortstatekey, event_id)| Some((shortstatekey, event_id.ok()?)))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(full_ids)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn state_full_shortids(
|
||||||
|
&self, shortstatehash: ShortStateHash,
|
||||||
|
) -> Result<Vec<(ShortStateKey, ShortEventId)>> {
|
||||||
|
let shortids = self
|
||||||
.services
|
.services
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.load_shortstatehash_info(shortstatehash)
|
.load_shortstatehash_info(shortstatehash)
|
||||||
|
@ -45,63 +113,13 @@ impl Data {
|
||||||
.map_err(|e| err!(Database("Missing state IDs: {e}")))?
|
.map_err(|e| err!(Database("Missing state IDs: {e}")))?
|
||||||
.pop()
|
.pop()
|
||||||
.expect("there is always one layer")
|
.expect("there is always one layer")
|
||||||
.full_state;
|
.full_state
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.map(parse_compressed_state_event)
|
||||||
|
.collect();
|
||||||
|
|
||||||
let mut result = HashMap::new();
|
Ok(shortids)
|
||||||
let mut i: u8 = 0;
|
|
||||||
for compressed in full_state.iter() {
|
|
||||||
let parsed = self
|
|
||||||
.services
|
|
||||||
.state_compressor
|
|
||||||
.parse_compressed_state_event(*compressed)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
result.insert(parsed.0, parsed.1);
|
|
||||||
|
|
||||||
i = i.wrapping_add(1);
|
|
||||||
if i % 100 == 0 {
|
|
||||||
tokio::task::yield_now().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(unused_qualifications)] // async traits
|
|
||||||
pub(super) async fn state_full(
|
|
||||||
&self, shortstatehash: ShortStateHash,
|
|
||||||
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
|
||||||
let full_state = self
|
|
||||||
.services
|
|
||||||
.state_compressor
|
|
||||||
.load_shortstatehash_info(shortstatehash)
|
|
||||||
.await?
|
|
||||||
.pop()
|
|
||||||
.expect("there is always one layer")
|
|
||||||
.full_state;
|
|
||||||
|
|
||||||
let mut result = HashMap::new();
|
|
||||||
let mut i: u8 = 0;
|
|
||||||
for compressed in full_state.iter() {
|
|
||||||
let (_, eventid) = self
|
|
||||||
.services
|
|
||||||
.state_compressor
|
|
||||||
.parse_compressed_state_event(*compressed)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if let Ok(pdu) = self.services.timeline.get_pdu(&eventid).await {
|
|
||||||
if let Some(state_key) = pdu.state_key.as_ref() {
|
|
||||||
result.insert((pdu.kind.to_string().into(), state_key.clone()), pdu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
i = i.wrapping_add(1);
|
|
||||||
if i % 100 == 0 {
|
|
||||||
tokio::task::yield_now().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`).
|
/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`).
|
||||||
|
@ -130,18 +148,11 @@ impl Data {
|
||||||
.find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
|
.find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
|
||||||
.ok_or(err!(Database("No shortstatekey in compressed state")))?;
|
.ok_or(err!(Database("No shortstatekey in compressed state")))?;
|
||||||
|
|
||||||
|
let (_, shorteventid) = parse_compressed_state_event(*compressed);
|
||||||
|
|
||||||
self.services
|
self.services
|
||||||
.state_compressor
|
.short
|
||||||
.parse_compressed_state_event(*compressed)
|
.get_eventid_from_short(shorteventid)
|
||||||
.map_ok(|(_, id)| id)
|
|
||||||
.map_err(|e| {
|
|
||||||
err!(Database(error!(
|
|
||||||
?event_type,
|
|
||||||
?state_key,
|
|
||||||
?shortstatekey,
|
|
||||||
"Failed to parse compressed: {e:?}"
|
|
||||||
)))
|
|
||||||
})
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,6 +187,17 @@ impl Data {
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the full room state's pdus.
|
||||||
|
#[allow(unused_qualifications)] // async traits
|
||||||
|
pub(super) async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result<Vec<Arc<PduEvent>>> {
|
||||||
|
self.services
|
||||||
|
.state
|
||||||
|
.get_room_shortstatehash(room_id)
|
||||||
|
.and_then(|shortstatehash| self.state_full_pdus(shortstatehash))
|
||||||
|
.map_err(|e| err!(Database("Missing state pdus for {room_id:?}: {e:?}")))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`).
|
/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`).
|
||||||
pub(super) async fn room_state_get_id(
|
pub(super) async fn room_state_get_id(
|
||||||
&self, room_id: &RoomId, event_type: &StateEventType, state_key: &str,
|
&self, room_id: &RoomId, event_type: &StateEventType, state_key: &str,
|
||||||
|
|
|
@ -41,7 +41,10 @@ use serde::Deserialize;
|
||||||
use self::data::Data;
|
use self::data::Data;
|
||||||
use crate::{
|
use crate::{
|
||||||
rooms,
|
rooms,
|
||||||
rooms::{short::ShortStateHash, state::RoomMutexGuard},
|
rooms::{
|
||||||
|
short::{ShortEventId, ShortStateHash, ShortStateKey},
|
||||||
|
state::RoomMutexGuard,
|
||||||
|
},
|
||||||
Dep,
|
Dep,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -102,6 +105,13 @@ impl Service {
|
||||||
self.db.state_full_ids(shortstatehash).await
|
self.db.state_full_ids(shortstatehash).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub async fn state_full_shortids(
|
||||||
|
&self, shortstatehash: ShortStateHash,
|
||||||
|
) -> Result<Vec<(ShortStateKey, ShortEventId)>> {
|
||||||
|
self.db.state_full_shortids(shortstatehash).await
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn state_full(
|
pub async fn state_full(
|
||||||
&self, shortstatehash: ShortStateHash,
|
&self, shortstatehash: ShortStateHash,
|
||||||
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
||||||
|
@ -287,7 +297,11 @@ impl Service {
|
||||||
c.history_visibility
|
c.history_visibility
|
||||||
});
|
});
|
||||||
|
|
||||||
history_visibility == HistoryVisibility::WorldReadable
|
match history_visibility {
|
||||||
|
HistoryVisibility::Invited => self.services.state_cache.is_invited(user_id, room_id).await,
|
||||||
|
HistoryVisibility::WorldReadable => true,
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the state hash for this pdu.
|
/// Returns the state hash for this pdu.
|
||||||
|
@ -301,6 +315,12 @@ impl Service {
|
||||||
self.db.room_state_full(room_id).await
|
self.db.room_state_full(room_id).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the full room state pdus
|
||||||
|
#[tracing::instrument(skip(self), level = "debug")]
|
||||||
|
pub async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result<Vec<Arc<PduEvent>>> {
|
||||||
|
self.db.room_state_full_pdus(room_id).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a single PDU from `room_id` with key (`event_type`,
|
/// Returns a single PDU from `room_id` with key (`event_type`,
|
||||||
/// `state_key`).
|
/// `state_key`).
|
||||||
#[tracing::instrument(skip(self), level = "debug")]
|
#[tracing::instrument(skip(self), level = "debug")]
|
||||||
|
|
|
@ -10,7 +10,7 @@ use conduit::{
|
||||||
warn, Result,
|
warn, Result,
|
||||||
};
|
};
|
||||||
use database::{serialize_to_vec, Deserialized, Ignore, Interfix, Json, Map};
|
use database::{serialize_to_vec, Deserialized, Ignore, Interfix, Json, Map};
|
||||||
use futures::{future::join4, stream::iter, Stream, StreamExt};
|
use futures::{future::join4, pin_mut, stream::iter, Stream, StreamExt};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
events::{
|
events::{
|
||||||
|
@ -385,16 +385,21 @@ impl Service {
|
||||||
/// Returns true if user_a and user_b share at least one room.
|
/// Returns true if user_a and user_b share at least one room.
|
||||||
#[tracing::instrument(skip(self), level = "debug")]
|
#[tracing::instrument(skip(self), level = "debug")]
|
||||||
pub async fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> bool {
|
pub async fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> bool {
|
||||||
// Minimize number of point-queries by iterating user with least nr rooms
|
let get_shared_rooms = self.get_shared_rooms(user_a, user_b);
|
||||||
let (a, b) = if self.rooms_joined(user_a).count().await < self.rooms_joined(user_b).count().await {
|
|
||||||
(user_a, user_b)
|
|
||||||
} else {
|
|
||||||
(user_b, user_a)
|
|
||||||
};
|
|
||||||
|
|
||||||
self.rooms_joined(a)
|
pin_mut!(get_shared_rooms);
|
||||||
.any(|room_id| self.is_joined(b, room_id))
|
get_shared_rooms.next().await.is_some()
|
||||||
.await
|
}
|
||||||
|
|
||||||
|
/// List the rooms common between two users
|
||||||
|
pub fn get_shared_rooms<'a>(
|
||||||
|
&'a self, user_a: &'a UserId, user_b: &'a UserId,
|
||||||
|
) -> impl Stream<Item = &RoomId> + Send + 'a {
|
||||||
|
use conduit::utils::set;
|
||||||
|
|
||||||
|
let a = self.rooms_joined(user_a);
|
||||||
|
let b = self.rooms_joined(user_b);
|
||||||
|
set::intersection_sorted_stream2(a, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator of all joined members of a room.
|
/// Returns an iterator of all joined members of a room.
|
||||||
|
|
|
@ -17,7 +17,7 @@ use ruma::{EventId, RoomId};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
rooms,
|
rooms,
|
||||||
rooms::short::{ShortId, ShortStateHash, ShortStateKey},
|
rooms::short::{ShortEventId, ShortId, ShortStateHash, ShortStateKey},
|
||||||
Dep,
|
Dep,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -89,9 +89,10 @@ impl crate::Service for Service {
|
||||||
.map(at!(1))
|
.map(at!(1))
|
||||||
.flat_map(|vec| vec.iter())
|
.flat_map(|vec| vec.iter())
|
||||||
.fold(HashMap::new(), |mut ents, ssi| {
|
.fold(HashMap::new(), |mut ents, ssi| {
|
||||||
ents.insert(Arc::as_ptr(&ssi.added), compressed_state_size(&ssi.added));
|
for cs in &[&ssi.added, &ssi.removed, &ssi.full_state] {
|
||||||
ents.insert(Arc::as_ptr(&ssi.removed), compressed_state_size(&ssi.removed));
|
ents.insert(Arc::as_ptr(cs), compressed_state_size(cs));
|
||||||
ents.insert(Arc::as_ptr(&ssi.full_state), compressed_state_size(&ssi.full_state));
|
}
|
||||||
|
|
||||||
ents
|
ents
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -125,51 +126,57 @@ impl Service {
|
||||||
return Ok(r.clone());
|
return Ok(r.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let StateDiff {
|
let stack = self.new_shortstatehash_info(shortstatehash).await?;
|
||||||
parent,
|
|
||||||
added,
|
|
||||||
removed,
|
|
||||||
} = self.get_statediff(shortstatehash).await?;
|
|
||||||
|
|
||||||
let response = if let Some(parent) = parent {
|
|
||||||
let mut response = Box::pin(self.load_shortstatehash_info(parent)).await?;
|
|
||||||
let mut state = (*response.last().expect("at least one response").full_state).clone();
|
|
||||||
state.extend(added.iter().copied());
|
|
||||||
let removed = (*removed).clone();
|
|
||||||
for r in &removed {
|
|
||||||
state.remove(r);
|
|
||||||
}
|
|
||||||
|
|
||||||
response.push(ShortStateInfo {
|
|
||||||
shortstatehash,
|
|
||||||
full_state: Arc::new(state),
|
|
||||||
added,
|
|
||||||
removed: Arc::new(removed),
|
|
||||||
});
|
|
||||||
|
|
||||||
response
|
|
||||||
} else {
|
|
||||||
vec![ShortStateInfo {
|
|
||||||
shortstatehash,
|
|
||||||
full_state: added.clone(),
|
|
||||||
added,
|
|
||||||
removed,
|
|
||||||
}]
|
|
||||||
};
|
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
?parent,
|
|
||||||
?shortstatehash,
|
?shortstatehash,
|
||||||
vec_len = %response.len(),
|
len = %stack.len(),
|
||||||
"cache update"
|
"cache update"
|
||||||
);
|
);
|
||||||
|
|
||||||
self.stateinfo_cache
|
self.stateinfo_cache
|
||||||
.lock()
|
.lock()
|
||||||
.expect("locked")
|
.expect("locked")
|
||||||
.insert(shortstatehash, response.clone());
|
.insert(shortstatehash, stack.clone());
|
||||||
|
|
||||||
Ok(response)
|
Ok(stack)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn new_shortstatehash_info(&self, shortstatehash: ShortStateHash) -> Result<ShortStateInfoVec> {
|
||||||
|
let StateDiff {
|
||||||
|
parent,
|
||||||
|
added,
|
||||||
|
removed,
|
||||||
|
} = self.get_statediff(shortstatehash).await?;
|
||||||
|
|
||||||
|
let Some(parent) = parent else {
|
||||||
|
return Ok(vec![ShortStateInfo {
|
||||||
|
shortstatehash,
|
||||||
|
full_state: added.clone(),
|
||||||
|
added,
|
||||||
|
removed,
|
||||||
|
}]);
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut stack = Box::pin(self.load_shortstatehash_info(parent)).await?;
|
||||||
|
let top = stack.last().expect("at least one frame");
|
||||||
|
|
||||||
|
let mut full_state = (*top.full_state).clone();
|
||||||
|
full_state.extend(added.iter().copied());
|
||||||
|
|
||||||
|
let removed = (*removed).clone();
|
||||||
|
for r in &removed {
|
||||||
|
full_state.remove(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
stack.push(ShortStateInfo {
|
||||||
|
shortstatehash,
|
||||||
|
added,
|
||||||
|
removed: Arc::new(removed),
|
||||||
|
full_state: Arc::new(full_state),
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(stack)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent {
|
pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent {
|
||||||
|
@ -189,24 +196,6 @@ impl Service {
|
||||||
.expect("failed to create CompressedStateEvent")
|
.expect("failed to create CompressedStateEvent")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns shortstatekey, event id
|
|
||||||
#[inline]
|
|
||||||
pub async fn parse_compressed_state_event(
|
|
||||||
&self, compressed_event: CompressedStateEvent,
|
|
||||||
) -> Result<(ShortStateKey, Arc<EventId>)> {
|
|
||||||
use utils::u64_from_u8;
|
|
||||||
|
|
||||||
let shortstatekey = u64_from_u8(&compressed_event[0..size_of::<ShortStateKey>()]);
|
|
||||||
let shorteventid = u64_from_u8(&compressed_event[size_of::<ShortStateKey>()..]);
|
|
||||||
let event_id = self
|
|
||||||
.services
|
|
||||||
.short
|
|
||||||
.get_eventid_from_short(shorteventid)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok((shortstatekey, event_id))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a new shortstatehash that often is just a diff to an already
|
/// Creates a new shortstatehash that often is just a diff to an already
|
||||||
/// existing shortstatehash and therefore very efficient.
|
/// existing shortstatehash and therefore very efficient.
|
||||||
///
|
///
|
||||||
|
@ -481,6 +470,17 @@ impl Service {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn parse_compressed_state_event(compressed_event: CompressedStateEvent) -> (ShortStateKey, ShortEventId) {
|
||||||
|
use utils::u64_from_u8;
|
||||||
|
|
||||||
|
let shortstatekey = u64_from_u8(&compressed_event[0..size_of::<ShortStateKey>()]);
|
||||||
|
let shorteventid = u64_from_u8(&compressed_event[size_of::<ShortStateKey>()..]);
|
||||||
|
|
||||||
|
(shortstatekey, shorteventid)
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn compressed_state_size(compressed_state: &CompressedState) -> usize {
|
fn compressed_state_size(compressed_state: &CompressedState) -> usize {
|
||||||
compressed_state
|
compressed_state
|
||||||
|
|
|
@ -9,10 +9,9 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{
|
use conduit::{
|
||||||
debug, err, error, implement, info,
|
debug, debug_warn, err, error, implement, info,
|
||||||
pdu::{EventHash, PduBuilder, PduCount, PduEvent},
|
pdu::{EventHash, PduBuilder, PduCount, PduEvent},
|
||||||
utils,
|
utils::{self, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt},
|
||||||
utils::{stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt},
|
|
||||||
validated, warn, Err, Error, Result, Server,
|
validated, warn, Err, Error, Result, Server,
|
||||||
};
|
};
|
||||||
pub use conduit::{PduId, RawPduId};
|
pub use conduit::{PduId, RawPduId};
|
||||||
|
@ -386,17 +385,19 @@ impl Service {
|
||||||
|
|
||||||
let sync_pdu = pdu.to_sync_room_event();
|
let sync_pdu = pdu.to_sync_room_event();
|
||||||
|
|
||||||
let mut notifies = Vec::new();
|
|
||||||
let mut highlights = Vec::new();
|
|
||||||
|
|
||||||
let mut push_target: HashSet<_> = self
|
let mut push_target: HashSet<_> = self
|
||||||
.services
|
.services
|
||||||
.state_cache
|
.state_cache
|
||||||
.active_local_users_in_room(&pdu.room_id)
|
.active_local_users_in_room(&pdu.room_id)
|
||||||
|
// Don't notify the sender of their own events
|
||||||
|
.ready_filter(|user| user != &pdu.sender)
|
||||||
.map(ToOwned::to_owned)
|
.map(ToOwned::to_owned)
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
let mut notifies = Vec::with_capacity(push_target.len().saturating_add(1));
|
||||||
|
let mut highlights = Vec::with_capacity(push_target.len().saturating_add(1));
|
||||||
|
|
||||||
if pdu.kind == TimelineEventType::RoomMember {
|
if pdu.kind == TimelineEventType::RoomMember {
|
||||||
if let Some(state_key) = &pdu.state_key {
|
if let Some(state_key) = &pdu.state_key {
|
||||||
let target_user_id = UserId::parse(state_key.clone())?;
|
let target_user_id = UserId::parse(state_key.clone())?;
|
||||||
|
@ -408,11 +409,6 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
for user in &push_target {
|
for user in &push_target {
|
||||||
// Don't notify the user of their own events
|
|
||||||
if user == &pdu.sender {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let rules_for_user = self
|
let rules_for_user = self
|
||||||
.services
|
.services
|
||||||
.account_data
|
.account_data
|
||||||
|
@ -436,6 +432,11 @@ impl Service {
|
||||||
},
|
},
|
||||||
_ => {},
|
_ => {},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Break early if both conditions are true
|
||||||
|
if notify && highlight {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if notify {
|
if notify {
|
||||||
|
@ -1128,7 +1129,7 @@ impl Service {
|
||||||
Ok(response) => {
|
Ok(response) => {
|
||||||
for pdu in response.pdus {
|
for pdu in response.pdus {
|
||||||
if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await {
|
if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await {
|
||||||
warn!("Failed to add backfilled pdu in room {room_id}: {e}");
|
debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
|
|
@ -2,7 +2,6 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use conduit::{implement, Result};
|
use conduit::{implement, Result};
|
||||||
use database::{Deserialized, Map};
|
use database::{Deserialized, Map};
|
||||||
use futures::{pin_mut, Stream, StreamExt};
|
|
||||||
use ruma::{RoomId, UserId};
|
use ruma::{RoomId, UserId};
|
||||||
|
|
||||||
use crate::{globals, rooms, rooms::short::ShortStateHash, Dep};
|
use crate::{globals, rooms, rooms::short::ShortStateHash, Dep};
|
||||||
|
@ -22,7 +21,6 @@ struct Data {
|
||||||
struct Services {
|
struct Services {
|
||||||
globals: Dep<globals::Service>,
|
globals: Dep<globals::Service>,
|
||||||
short: Dep<rooms::short::Service>,
|
short: Dep<rooms::short::Service>,
|
||||||
state_cache: Dep<rooms::state_cache::Service>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl crate::Service for Service {
|
impl crate::Service for Service {
|
||||||
|
@ -38,7 +36,6 @@ impl crate::Service for Service {
|
||||||
services: Services {
|
services: Services {
|
||||||
globals: args.depend::<globals::Service>("globals"),
|
globals: args.depend::<globals::Service>("globals"),
|
||||||
short: args.depend::<rooms::short::Service>("rooms::short"),
|
short: args.depend::<rooms::short::Service>("rooms::short"),
|
||||||
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
|
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -118,22 +115,3 @@ pub async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Re
|
||||||
.await
|
.await
|
||||||
.deserialized()
|
.deserialized()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Service)]
|
|
||||||
pub async fn has_shared_rooms<'a>(&'a self, user_a: &'a UserId, user_b: &'a UserId) -> bool {
|
|
||||||
let get_shared_rooms = self.get_shared_rooms(user_a, user_b);
|
|
||||||
|
|
||||||
pin_mut!(get_shared_rooms);
|
|
||||||
get_shared_rooms.next().await.is_some()
|
|
||||||
}
|
|
||||||
|
|
||||||
//TODO: optimize; replace point-queries with dual iteration
|
|
||||||
#[implement(Service)]
|
|
||||||
pub fn get_shared_rooms<'a>(
|
|
||||||
&'a self, user_a: &'a UserId, user_b: &'a UserId,
|
|
||||||
) -> impl Stream<Item = &RoomId> + Send + 'a {
|
|
||||||
self.services
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(user_a)
|
|
||||||
.filter(|room_id| self.services.state_cache.is_joined(user_b, room_id))
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::sync::Arc;
|
use std::{fmt::Debug, sync::Arc};
|
||||||
|
|
||||||
use conduit::{
|
use conduit::{
|
||||||
utils,
|
at, utils,
|
||||||
utils::{stream::TryIgnore, ReadyExt},
|
utils::{stream::TryIgnore, ReadyExt},
|
||||||
Error, Result,
|
Error, Result,
|
||||||
};
|
};
|
||||||
|
@ -69,20 +69,22 @@ impl Data {
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn mark_as_active(&self, events: &[QueueItem]) {
|
pub(super) fn mark_as_active<'a, I>(&self, events: I)
|
||||||
for (key, e) in events {
|
where
|
||||||
if key.is_empty() {
|
I: Iterator<Item = &'a QueueItem>,
|
||||||
continue;
|
{
|
||||||
}
|
events
|
||||||
|
.filter(|(key, _)| !key.is_empty())
|
||||||
let value = if let SendingEvent::Edu(value) = &e {
|
.for_each(|(key, val)| {
|
||||||
&**value
|
let val = if let SendingEvent::Edu(val) = &val {
|
||||||
|
&**val
|
||||||
} else {
|
} else {
|
||||||
&[]
|
&[]
|
||||||
};
|
};
|
||||||
self.servercurrentevent_data.insert(key, value);
|
|
||||||
|
self.servercurrentevent_data.insert(key, val);
|
||||||
self.servernameevent_data.remove(key);
|
self.servernameevent_data.remove(key);
|
||||||
}
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -110,26 +112,40 @@ impl Data {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn queue_requests(&self, requests: &[(&SendingEvent, &Destination)]) -> Vec<Vec<u8>> {
|
pub(super) fn queue_requests<'a, I>(&self, requests: I) -> Vec<Vec<u8>>
|
||||||
let mut batch = Vec::new();
|
where
|
||||||
let mut keys = Vec::new();
|
I: Iterator<Item = (&'a SendingEvent, &'a Destination)> + Clone + Debug + Send,
|
||||||
for (event, destination) in requests {
|
{
|
||||||
let mut key = destination.get_prefix();
|
let keys: Vec<_> = requests
|
||||||
|
.clone()
|
||||||
|
.map(|(event, dest)| {
|
||||||
|
let mut key = dest.get_prefix();
|
||||||
if let SendingEvent::Pdu(value) = event {
|
if let SendingEvent::Pdu(value) = event {
|
||||||
key.extend(value.as_ref());
|
key.extend(value.as_ref());
|
||||||
} else {
|
} else {
|
||||||
key.extend(&self.services.globals.next_count().unwrap().to_be_bytes());
|
let count = self.services.globals.next_count().unwrap();
|
||||||
|
key.extend(&count.to_be_bytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
key
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
self.servernameevent_data.insert_batch(
|
||||||
|
keys.iter()
|
||||||
|
.map(Vec::as_slice)
|
||||||
|
.zip(requests.map(at!(0)))
|
||||||
|
.map(|(key, event)| {
|
||||||
let value = if let SendingEvent::Edu(value) = &event {
|
let value = if let SendingEvent::Edu(value) = &event {
|
||||||
&**value
|
&**value
|
||||||
} else {
|
} else {
|
||||||
&[]
|
&[]
|
||||||
};
|
};
|
||||||
batch.push((key.clone(), value.to_owned()));
|
|
||||||
keys.push(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.servernameevent_data.insert_batch(batch.iter());
|
(key, value)
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
keys
|
keys
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,11 +4,11 @@ mod dest;
|
||||||
mod send;
|
mod send;
|
||||||
mod sender;
|
mod sender;
|
||||||
|
|
||||||
use std::{fmt::Debug, sync::Arc};
|
use std::{fmt::Debug, iter::once, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use conduit::{
|
use conduit::{
|
||||||
err,
|
debug_warn, err,
|
||||||
utils::{ReadyExt, TryReadyExt},
|
utils::{ReadyExt, TryReadyExt},
|
||||||
warn, Result, Server,
|
warn, Result, Server,
|
||||||
};
|
};
|
||||||
|
@ -117,7 +117,7 @@ impl Service {
|
||||||
let dest = Destination::Push(user.to_owned(), pushkey);
|
let dest = Destination::Push(user.to_owned(), pushkey);
|
||||||
let event = SendingEvent::Pdu(*pdu_id);
|
let event = SendingEvent::Pdu(*pdu_id);
|
||||||
let _cork = self.db.db.cork();
|
let _cork = self.db.db.cork();
|
||||||
let keys = self.db.queue_requests(&[(&event, &dest)]);
|
let keys = self.db.queue_requests(once((&event, &dest)));
|
||||||
self.dispatch(Msg {
|
self.dispatch(Msg {
|
||||||
dest,
|
dest,
|
||||||
event,
|
event,
|
||||||
|
@ -130,7 +130,7 @@ impl Service {
|
||||||
let dest = Destination::Appservice(appservice_id);
|
let dest = Destination::Appservice(appservice_id);
|
||||||
let event = SendingEvent::Pdu(pdu_id);
|
let event = SendingEvent::Pdu(pdu_id);
|
||||||
let _cork = self.db.db.cork();
|
let _cork = self.db.db.cork();
|
||||||
let keys = self.db.queue_requests(&[(&event, &dest)]);
|
let keys = self.db.queue_requests(once((&event, &dest)));
|
||||||
self.dispatch(Msg {
|
self.dispatch(Msg {
|
||||||
dest,
|
dest,
|
||||||
event,
|
event,
|
||||||
|
@ -160,9 +160,7 @@ impl Service {
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let keys = self
|
let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o)));
|
||||||
.db
|
|
||||||
.queue_requests(&requests.iter().map(|(o, e)| (e, o)).collect::<Vec<_>>());
|
|
||||||
|
|
||||||
for ((dest, event), queue_id) in requests.into_iter().zip(keys) {
|
for ((dest, event), queue_id) in requests.into_iter().zip(keys) {
|
||||||
self.dispatch(Msg {
|
self.dispatch(Msg {
|
||||||
|
@ -180,7 +178,7 @@ impl Service {
|
||||||
let dest = Destination::Normal(server.to_owned());
|
let dest = Destination::Normal(server.to_owned());
|
||||||
let event = SendingEvent::Edu(serialized);
|
let event = SendingEvent::Edu(serialized);
|
||||||
let _cork = self.db.db.cork();
|
let _cork = self.db.db.cork();
|
||||||
let keys = self.db.queue_requests(&[(&event, &dest)]);
|
let keys = self.db.queue_requests(once((&event, &dest)));
|
||||||
self.dispatch(Msg {
|
self.dispatch(Msg {
|
||||||
dest,
|
dest,
|
||||||
event,
|
event,
|
||||||
|
@ -210,9 +208,7 @@ impl Service {
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let keys = self
|
let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o)));
|
||||||
.db
|
|
||||||
.queue_requests(&requests.iter().map(|(o, e)| (e, o)).collect::<Vec<_>>());
|
|
||||||
|
|
||||||
for ((dest, event), queue_id) in requests.into_iter().zip(keys) {
|
for ((dest, event), queue_id) in requests.into_iter().zip(keys) {
|
||||||
self.dispatch(Msg {
|
self.dispatch(Msg {
|
||||||
|
@ -289,13 +285,34 @@ impl Service {
|
||||||
appservice::send_request(client, registration, request).await
|
appservice::send_request(client, registration, request).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cleanup event data
|
/// Clean up queued sending event data
|
||||||
/// Used for instance after we remove an appservice registration
|
///
|
||||||
|
/// Used after we remove an appservice registration or a user deletes a push
|
||||||
|
/// key
|
||||||
#[tracing::instrument(skip(self), level = "debug")]
|
#[tracing::instrument(skip(self), level = "debug")]
|
||||||
pub async fn cleanup_events(&self, appservice_id: String) {
|
pub async fn cleanup_events(
|
||||||
|
&self, appservice_id: Option<&str>, user_id: Option<&UserId>, push_key: Option<&str>,
|
||||||
|
) -> Result {
|
||||||
|
match (appservice_id, user_id, push_key) {
|
||||||
|
(None, Some(user_id), Some(push_key)) => {
|
||||||
self.db
|
self.db
|
||||||
.delete_all_requests_for(&Destination::Appservice(appservice_id))
|
.delete_all_requests_for(&Destination::Push(user_id.to_owned(), push_key.to_owned()))
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
(Some(appservice_id), None, None) => {
|
||||||
|
self.db
|
||||||
|
.delete_all_requests_for(&Destination::Appservice(appservice_id.to_owned()))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
debug_warn!("cleanup_events called with too many or too few arguments");
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dispatch(&self, msg: Msg) -> Result<()> {
|
fn dispatch(&self, msg: Msg) -> Result<()> {
|
||||||
|
|
|
@ -118,7 +118,7 @@ impl Service {
|
||||||
|
|
||||||
// Insert any pdus we found
|
// Insert any pdus we found
|
||||||
if !new_events.is_empty() {
|
if !new_events.is_empty() {
|
||||||
self.db.mark_as_active(&new_events);
|
self.db.mark_as_active(new_events.iter());
|
||||||
|
|
||||||
let new_events_vec = new_events.into_iter().map(|(_, event)| event).collect();
|
let new_events_vec = new_events.into_iter().map(|(_, event)| event).collect();
|
||||||
futures.push(self.send_events(dest.clone(), new_events_vec).boxed());
|
futures.push(self.send_events(dest.clone(), new_events_vec).boxed());
|
||||||
|
@ -213,7 +213,7 @@ impl Service {
|
||||||
// Compose the next transaction
|
// Compose the next transaction
|
||||||
let _cork = self.db.db.cork();
|
let _cork = self.db.db.cork();
|
||||||
if !new_events.is_empty() {
|
if !new_events.is_empty() {
|
||||||
self.db.mark_as_active(&new_events);
|
self.db.mark_as_active(new_events.iter());
|
||||||
for (_, e) in new_events {
|
for (_, e) in new_events {
|
||||||
events.push(e);
|
events.push(e);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue