mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-09-12 02:33:01 +02:00
Compare commits
6 commits
ad4651167a
...
823c73f480
Author | SHA1 | Date | |
---|---|---|---|
|
823c73f480 |
||
|
be7c334815 |
||
|
81d013dd61 |
||
|
1aa891c053 |
||
|
7e0c021603 |
||
|
6915e1c57e |
208 changed files with 8469 additions and 11806 deletions
|
@ -1,2 +0,0 @@
|
||||||
[alias]
|
|
||||||
xtask = "run --package xtask --"
|
|
|
@ -1,55 +0,0 @@
|
||||||
version: 1
|
|
||||||
|
|
||||||
x-source: &source forgejo.ellis.link/continuwuation/continuwuity
|
|
||||||
|
|
||||||
x-tags:
|
|
||||||
releases: &tags-releases
|
|
||||||
tags:
|
|
||||||
allow:
|
|
||||||
- "latest"
|
|
||||||
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
|
|
||||||
- "v[0-9]+\\.[0-9]+"
|
|
||||||
- "v[0-9]+"
|
|
||||||
main: &tags-main
|
|
||||||
tags:
|
|
||||||
allow:
|
|
||||||
- "latest"
|
|
||||||
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
|
|
||||||
- "v[0-9]+\\.[0-9]+"
|
|
||||||
- "v[0-9]+"
|
|
||||||
- "main"
|
|
||||||
commits: &tags-commits
|
|
||||||
tags:
|
|
||||||
allow:
|
|
||||||
- "latest"
|
|
||||||
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
|
|
||||||
- "v[0-9]+\\.[0-9]+"
|
|
||||||
- "v[0-9]+"
|
|
||||||
- "main"
|
|
||||||
- "sha-[a-f0-9]+"
|
|
||||||
all: &tags-all
|
|
||||||
tags:
|
|
||||||
allow:
|
|
||||||
- ".*"
|
|
||||||
|
|
||||||
# Registry credentials
|
|
||||||
creds:
|
|
||||||
- registry: forgejo.ellis.link
|
|
||||||
user: "{{env \"BUILTIN_REGISTRY_USER\"}}"
|
|
||||||
pass: "{{env \"BUILTIN_REGISTRY_PASSWORD\"}}"
|
|
||||||
- registry: registry.gitlab.com
|
|
||||||
user: "{{env \"GITLAB_USERNAME\"}}"
|
|
||||||
pass: "{{env \"GITLAB_TOKEN\"}}"
|
|
||||||
|
|
||||||
# Global defaults
|
|
||||||
defaults:
|
|
||||||
parallel: 3
|
|
||||||
interval: 2h
|
|
||||||
digestTags: true
|
|
||||||
|
|
||||||
# Sync configuration - each registry gets different image sets
|
|
||||||
sync:
|
|
||||||
- source: *source
|
|
||||||
target: registry.gitlab.com/continuwuity/continuwuity
|
|
||||||
type: repository
|
|
||||||
<<: *tags-main
|
|
|
@ -11,16 +11,16 @@ concurrency:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-and-deploy:
|
build-and-deploy:
|
||||||
name: 🏗️ Build and Deploy
|
name: Build and Deploy Element Web
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: 📦 Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: https://github.com/actions/setup-node@v4
|
uses: https://code.forgejo.org/actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: "22"
|
node-version: "20"
|
||||||
|
|
||||||
- name: 🔨 Clone, setup, and build Element Web
|
- name: Clone, setup, and build Element Web
|
||||||
run: |
|
run: |
|
||||||
echo "Cloning Element Web..."
|
echo "Cloning Element Web..."
|
||||||
git clone https://github.com/maunium/element-web
|
git clone https://github.com/maunium/element-web
|
||||||
|
@ -64,7 +64,7 @@ jobs:
|
||||||
echo "Checking for build output..."
|
echo "Checking for build output..."
|
||||||
ls -la webapp/
|
ls -la webapp/
|
||||||
|
|
||||||
- name: ⚙️ Create config.json
|
- name: Create config.json
|
||||||
run: |
|
run: |
|
||||||
cat <<EOF > ./element-web/webapp/config.json
|
cat <<EOF > ./element-web/webapp/config.json
|
||||||
{
|
{
|
||||||
|
@ -100,25 +100,28 @@ jobs:
|
||||||
echo "Created ./element-web/webapp/config.json"
|
echo "Created ./element-web/webapp/config.json"
|
||||||
cat ./element-web/webapp/config.json
|
cat ./element-web/webapp/config.json
|
||||||
|
|
||||||
- name: 📤 Upload Artifact
|
- name: Upload Artifact
|
||||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: element-web
|
name: element-web
|
||||||
path: ./element-web/webapp/
|
path: ./element-web/webapp/
|
||||||
retention-days: 14
|
retention-days: 14
|
||||||
|
|
||||||
- name: 🛠️ Install Wrangler
|
- name: Install Wrangler
|
||||||
run: npm install --save-dev wrangler@latest
|
run: npm install --save-dev wrangler@latest
|
||||||
|
|
||||||
- name: 🚀 Deploy to Cloudflare Pages
|
- name: Deploy to Cloudflare Pages (Production)
|
||||||
if: vars.CLOUDFLARE_PROJECT_NAME != ''
|
if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||||
id: deploy
|
|
||||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||||
with:
|
with:
|
||||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||||
command: >-
|
command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
|
||||||
pages deploy ./element-web/webapp
|
|
||||||
--branch="${{ github.ref == 'refs/heads/main' && 'main' || github.head_ref || github.ref_name }}"
|
- name: Deploy to Cloudflare Pages (Preview)
|
||||||
--commit-dirty=true
|
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||||
--project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
|
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||||
|
with:
|
||||||
|
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||||
|
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||||
|
command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
name: Mirror Container Images
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
# Run every 2 hours
|
|
||||||
- cron: "0 */2 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
dry_run:
|
|
||||||
description: 'Dry run (check only, no actual mirroring)'
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
type: boolean
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: "mirror-images"
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
mirror-images:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
BUILTIN_REGISTRY_USER: ${{ vars.BUILTIN_REGISTRY_USER }}
|
|
||||||
BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }}
|
|
||||||
GITLAB_USERNAME: ${{ vars.GITLAB_USERNAME }}
|
|
||||||
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Install regctl
|
|
||||||
uses: https://forgejo.ellis.link/continuwuation/regclient-actions/regctl-installer@main
|
|
||||||
with:
|
|
||||||
binary: regsync
|
|
||||||
|
|
||||||
- name: Check what images need mirroring
|
|
||||||
run: |
|
|
||||||
echo "Checking images that need mirroring..."
|
|
||||||
regsync check -c .forgejo/regsync/regsync.yml -v info
|
|
||||||
|
|
||||||
- name: Mirror images
|
|
||||||
if: ${{ !inputs.dry_run }}
|
|
||||||
run: |
|
|
||||||
echo "Starting image mirroring..."
|
|
||||||
regsync once -c .forgejo/regsync/regsync.yml -v info
|
|
|
@ -204,31 +204,13 @@ jobs:
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
digest="${{ steps.build.outputs.digest }}"
|
||||||
touch "/tmp/digests/${digest#sha256:}"
|
touch "/tmp/digests/${digest#sha256:}"
|
||||||
|
|
||||||
- name: Extract binary from container (image)
|
|
||||||
id: extract-binary-image
|
|
||||||
run: |
|
|
||||||
mkdir -p /tmp/binaries
|
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
|
||||||
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.define-variables.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
|
|
||||||
- name: Extract binary from container (copy)
|
|
||||||
run: docker cp ${{ steps.extract-binary-image.outputs.container_id }}:/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
|
||||||
- name: Extract binary from container (cleanup)
|
|
||||||
run: docker rm ${{ steps.extract-binary-image.outputs.container_id }}
|
|
||||||
|
|
||||||
- name: Upload binary artifact
|
|
||||||
uses: forgejo/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
|
||||||
path: /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
|
||||||
if-no-files-found: error
|
|
||||||
|
|
||||||
- name: Upload digest
|
- name: Upload digest
|
||||||
uses: forgejo/upload-artifact@v4
|
uses: forgejo/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: digests-${{ matrix.slug }}
|
name: digests-${{ matrix.slug }}
|
||||||
path: /tmp/digests/*
|
path: /tmp/digests/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
retention-days: 5
|
retention-days: 1
|
||||||
|
|
||||||
merge:
|
merge:
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
|
@ -256,13 +238,12 @@ jobs:
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
tags: |
|
tags: |
|
||||||
type=semver,pattern={{version}},prefix=v
|
type=semver,pattern=v{{version}}
|
||||||
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
|
type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }}
|
||||||
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
|
type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
|
||||||
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }}
|
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }}
|
||||||
type=ref,event=pr
|
type=ref,event=pr
|
||||||
type=sha,format=long
|
type=sha,format=long
|
||||||
type=raw,value=latest,enable=${{ !startsWith(github.ref, 'refs/tags/v') }}
|
|
||||||
images: ${{needs.define-variables.outputs.images}}
|
images: ${{needs.define-variables.outputs.images}}
|
||||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||||
env:
|
env:
|
||||||
|
|
167
Cargo.lock
generated
167
Cargo.lock
generated
|
@ -50,56 +50,12 @@ dependencies = [
|
||||||
"alloc-no-stdlib",
|
"alloc-no-stdlib",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "anstream"
|
|
||||||
version = "0.6.18"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
|
|
||||||
dependencies = [
|
|
||||||
"anstyle",
|
|
||||||
"anstyle-parse",
|
|
||||||
"anstyle-query",
|
|
||||||
"anstyle-wincon",
|
|
||||||
"colorchoice",
|
|
||||||
"is_terminal_polyfill",
|
|
||||||
"utf8parse",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "anstyle"
|
name = "anstyle"
|
||||||
version = "1.0.11"
|
version = "1.0.11"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd"
|
checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "anstyle-parse"
|
|
||||||
version = "0.2.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
|
|
||||||
dependencies = [
|
|
||||||
"utf8parse",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "anstyle-query"
|
|
||||||
version = "1.1.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
|
|
||||||
dependencies = [
|
|
||||||
"windows-sys 0.59.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "anstyle-wincon"
|
|
||||||
version = "3.0.8"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa"
|
|
||||||
dependencies = [
|
|
||||||
"anstyle",
|
|
||||||
"once_cell_polyfill",
|
|
||||||
"windows-sys 0.59.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "anyhow"
|
name = "anyhow"
|
||||||
version = "1.0.98"
|
version = "1.0.98"
|
||||||
|
@ -764,25 +720,14 @@ dependencies = [
|
||||||
"clap_derive",
|
"clap_derive",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "clap-markdown"
|
|
||||||
version = "0.1.5"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "d2a2617956a06d4885b490697b5307ebb09fec10b088afc18c81762d848c2339"
|
|
||||||
dependencies = [
|
|
||||||
"clap",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap_builder"
|
name = "clap_builder"
|
||||||
version = "4.5.40"
|
version = "4.5.40"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e"
|
checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anstream",
|
|
||||||
"anstyle",
|
"anstyle",
|
||||||
"clap_lex",
|
"clap_lex",
|
||||||
"strsim",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -803,16 +748,6 @@ version = "0.7.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
|
checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "clap_mangen"
|
|
||||||
version = "0.2.26"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "724842fa9b144f9b89b3f3d371a89f3455eea660361d13a554f68f8ae5d6c13a"
|
|
||||||
dependencies = [
|
|
||||||
"clap",
|
|
||||||
"roff",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cmake"
|
name = "cmake"
|
||||||
version = "0.1.54"
|
version = "0.1.54"
|
||||||
|
@ -828,12 +763,6 @@ version = "1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
|
checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "colorchoice"
|
|
||||||
version = "1.0.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "concurrent-queue"
|
name = "concurrent-queue"
|
||||||
version = "2.5.0"
|
version = "2.5.0"
|
||||||
|
@ -868,7 +797,6 @@ dependencies = [
|
||||||
"tokio-metrics",
|
"tokio-metrics",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-flame",
|
"tracing-flame",
|
||||||
"tracing-journald",
|
|
||||||
"tracing-opentelemetry",
|
"tracing-opentelemetry",
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
]
|
]
|
||||||
|
@ -1076,7 +1004,6 @@ dependencies = [
|
||||||
"loole",
|
"loole",
|
||||||
"lru-cache",
|
"lru-cache",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"recaptcha-verify",
|
|
||||||
"regex",
|
"regex",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"ruma",
|
"ruma",
|
||||||
|
@ -2471,12 +2398,6 @@ version = "2.11.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
|
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "is_terminal_polyfill"
|
|
||||||
version = "1.70.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "itertools"
|
name = "itertools"
|
||||||
version = "0.12.1"
|
version = "0.12.1"
|
||||||
|
@ -3085,12 +3006,6 @@ dependencies = [
|
||||||
"portable-atomic",
|
"portable-atomic",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "once_cell_polyfill"
|
|
||||||
version = "1.70.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "openssl-probe"
|
name = "openssl-probe"
|
||||||
version = "0.1.6"
|
version = "0.1.6"
|
||||||
|
@ -3752,17 +3667,6 @@ dependencies = [
|
||||||
"crossbeam-utils",
|
"crossbeam-utils",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "recaptcha-verify"
|
|
||||||
version = "0.1.5"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "71e3be7b2e46e24637ac96b0c9f70070f188652018573f36f4e511dcad09738a"
|
|
||||||
dependencies = [
|
|
||||||
"reqwest",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "redox_syscall"
|
name = "redox_syscall"
|
||||||
version = "0.5.13"
|
version = "0.5.13"
|
||||||
|
@ -3891,16 +3795,10 @@ dependencies = [
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "roff"
|
|
||||||
version = "0.2.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "88f8660c1ff60292143c98d08fc6e2f654d722db50410e3f3797d40baaf9d8f3"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma"
|
name = "ruma"
|
||||||
version = "0.10.1"
|
version = "0.10.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assign",
|
"assign",
|
||||||
"js_int",
|
"js_int",
|
||||||
|
@ -3920,7 +3818,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-appservice-api"
|
name = "ruma-appservice-api"
|
||||||
version = "0.10.0"
|
version = "0.10.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -3932,7 +3830,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-client-api"
|
name = "ruma-client-api"
|
||||||
version = "0.18.0"
|
version = "0.18.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"assign",
|
"assign",
|
||||||
|
@ -3955,7 +3853,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-common"
|
name = "ruma-common"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
|
@ -3987,7 +3885,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-events"
|
name = "ruma-events"
|
||||||
version = "0.28.1"
|
version = "0.28.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"indexmap 2.9.0",
|
"indexmap 2.9.0",
|
||||||
|
@ -4012,7 +3910,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-federation-api"
|
name = "ruma-federation-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"headers",
|
"headers",
|
||||||
|
@ -4034,7 +3932,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-identifiers-validation"
|
name = "ruma-identifiers-validation"
|
||||||
version = "0.9.5"
|
version = "0.9.5"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"thiserror 2.0.12",
|
"thiserror 2.0.12",
|
||||||
|
@ -4043,7 +3941,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-identity-service-api"
|
name = "ruma-identity-service-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -4053,7 +3951,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-macros"
|
name = "ruma-macros"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"proc-macro-crate",
|
"proc-macro-crate",
|
||||||
|
@ -4068,7 +3966,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-push-gateway-api"
|
name = "ruma-push-gateway-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -4080,7 +3978,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-signatures"
|
name = "ruma-signatures"
|
||||||
version = "0.15.0"
|
version = "0.15.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02"
|
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"ed25519-dalek",
|
"ed25519-dalek",
|
||||||
|
@ -4738,12 +4636,6 @@ dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "strsim"
|
|
||||||
version = "0.11.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "subslice"
|
name = "subslice"
|
||||||
version = "0.2.3"
|
version = "0.2.3"
|
||||||
|
@ -5286,17 +5178,6 @@ dependencies = [
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "tracing-journald"
|
|
||||||
version = "0.3.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657"
|
|
||||||
dependencies = [
|
|
||||||
"libc",
|
|
||||||
"tracing-core",
|
|
||||||
"tracing-subscriber",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing-log"
|
name = "tracing-log"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
|
@ -5474,12 +5355,6 @@ version = "1.0.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
|
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "utf8parse"
|
|
||||||
version = "0.2.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "uuid"
|
name = "uuid"
|
||||||
version = "1.17.0"
|
version = "1.17.0"
|
||||||
|
@ -6131,26 +6006,6 @@ dependencies = [
|
||||||
"markup5ever",
|
"markup5ever",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "xtask"
|
|
||||||
version = "0.5.0-rc.6"
|
|
||||||
dependencies = [
|
|
||||||
"clap",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "xtask-generate-commands"
|
|
||||||
version = "0.5.0-rc.6"
|
|
||||||
dependencies = [
|
|
||||||
"clap-markdown",
|
|
||||||
"clap_builder",
|
|
||||||
"clap_mangen",
|
|
||||||
"conduwuit",
|
|
||||||
"conduwuit_admin",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "yansi"
|
name = "yansi"
|
||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
|
|
14
Cargo.toml
14
Cargo.toml
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
members = ["src/*", "xtask/*"]
|
members = ["src/*"]
|
||||||
default-members = ["src/*"]
|
default-members = ["src/*"]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
|
@ -213,8 +213,6 @@ default-features = false
|
||||||
version = "0.3.19"
|
version = "0.3.19"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
|
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
|
||||||
[workspace.dependencies.tracing-journald]
|
|
||||||
version = "0.3.1"
|
|
||||||
[workspace.dependencies.tracing-core]
|
[workspace.dependencies.tracing-core]
|
||||||
version = "0.1.33"
|
version = "0.1.33"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
@ -352,7 +350,7 @@ version = "0.1.2"
|
||||||
[workspace.dependencies.ruma]
|
[workspace.dependencies.ruma]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||||
#branch = "conduwuit-changes"
|
#branch = "conduwuit-changes"
|
||||||
rev = "a4b948b40417a65ab0282ae47cc50035dd455e02"
|
rev = "9b65f83981f6f53d185ce77da37aaef9dfd764a9"
|
||||||
features = [
|
features = [
|
||||||
"compat",
|
"compat",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -638,11 +636,6 @@ package = "conduwuit_build_metadata"
|
||||||
path = "src/build_metadata"
|
path = "src/build_metadata"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
|
|
||||||
[workspace.dependencies.conduwuit]
|
|
||||||
package = "conduwuit"
|
|
||||||
path = "src/main"
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
#
|
#
|
||||||
# Release profiles
|
# Release profiles
|
||||||
|
@ -768,8 +761,7 @@ inherits = "dev"
|
||||||
# '-Clink-arg=-Wl,-z,nodlopen',
|
# '-Clink-arg=-Wl,-z,nodlopen',
|
||||||
# '-Clink-arg=-Wl,-z,nodelete',
|
# '-Clink-arg=-Wl,-z,nodelete',
|
||||||
#]
|
#]
|
||||||
[profile.dev.package.xtask-generate-commands]
|
|
||||||
inherits = "dev"
|
|
||||||
[profile.dev.package.conduwuit]
|
[profile.dev.package.conduwuit]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
#rustflags = [
|
#rustflags = [
|
||||||
|
|
|
@ -17,10 +17,6 @@ DeviceAllow=char-tty
|
||||||
StandardInput=tty-force
|
StandardInput=tty-force
|
||||||
StandardOutput=tty
|
StandardOutput=tty
|
||||||
StandardError=journal+console
|
StandardError=journal+console
|
||||||
|
|
||||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=1"
|
|
||||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
|
||||||
|
|
||||||
TTYReset=yes
|
TTYReset=yes
|
||||||
# uncomment to allow buffer to be cleared every restart
|
# uncomment to allow buffer to be cleared every restart
|
||||||
TTYVTDisallocate=no
|
TTYVTDisallocate=no
|
||||||
|
|
|
@ -1,3 +1,2 @@
|
||||||
style = "conventional"
|
style = "conventional"
|
||||||
subject_length = 72
|
|
||||||
allowed_types = ["ci", "build", "fix", "feat", "chore", "docs", "style", "refactor", "perf", "test"]
|
allowed_types = ["ci", "build", "fix", "feat", "chore", "docs", "style", "refactor", "perf", "test"]
|
||||||
|
|
|
@ -398,22 +398,6 @@
|
||||||
#
|
#
|
||||||
#allow_registration = false
|
#allow_registration = false
|
||||||
|
|
||||||
# If registration is enabled, and this setting is true, new users
|
|
||||||
# registered after the first admin user will be automatically suspended
|
|
||||||
# and will require an admin to run `!admin users unsuspend <user_id>`.
|
|
||||||
#
|
|
||||||
# Suspended users are still able to read messages, make profile updates,
|
|
||||||
# leave rooms, and deactivate their account, however cannot send messages,
|
|
||||||
# invites, or create/join or otherwise modify rooms.
|
|
||||||
# They are effectively read-only.
|
|
||||||
#
|
|
||||||
# If you want to use this to screen people who register on your server,
|
|
||||||
# you should add a room to `auto_join_rooms` that is public, and contains
|
|
||||||
# information that new users can read (since they won't be able to DM
|
|
||||||
# anyone, or send a message, and may be confused).
|
|
||||||
#
|
|
||||||
#suspend_on_register = false
|
|
||||||
|
|
||||||
# Enabling this setting opens registration to anyone without restrictions.
|
# Enabling this setting opens registration to anyone without restrictions.
|
||||||
# This makes your server vulnerable to abuse
|
# This makes your server vulnerable to abuse
|
||||||
#
|
#
|
||||||
|
@ -441,26 +425,6 @@
|
||||||
#
|
#
|
||||||
#registration_token_file =
|
#registration_token_file =
|
||||||
|
|
||||||
# The public site key for reCaptcha. If this is provided, reCaptcha
|
|
||||||
# becomes required during registration. If both captcha *and*
|
|
||||||
# registration token are enabled, both will be required during
|
|
||||||
# registration.
|
|
||||||
#
|
|
||||||
# IMPORTANT: "Verify the origin of reCAPTCHA solutions" **MUST** BE
|
|
||||||
# DISABLED IF YOU WANT THE CAPTCHA TO WORK IN 3RD PARTY CLIENTS, OR
|
|
||||||
# CLIENTS HOSTED ON DOMAINS OTHER THAN YOUR OWN!
|
|
||||||
#
|
|
||||||
# Registration must be enabled (`allow_registration` must be true) for
|
|
||||||
# this to have any effect.
|
|
||||||
#
|
|
||||||
#recaptcha_site_key =
|
|
||||||
|
|
||||||
# The private site key for reCaptcha.
|
|
||||||
# If this is omitted, captcha registration will not work,
|
|
||||||
# even if `recaptcha_site_key` is set.
|
|
||||||
#
|
|
||||||
#recaptcha_private_site_key =
|
|
||||||
|
|
||||||
# Controls whether encrypted rooms and events are allowed.
|
# Controls whether encrypted rooms and events are allowed.
|
||||||
#
|
#
|
||||||
#allow_encryption = true
|
#allow_encryption = true
|
||||||
|
@ -696,21 +660,6 @@
|
||||||
#
|
#
|
||||||
#log_thread_ids = false
|
#log_thread_ids = false
|
||||||
|
|
||||||
# Enable journald logging on Unix platforms
|
|
||||||
#
|
|
||||||
# When enabled, log output will be sent to the systemd journal
|
|
||||||
# This is only supported on Unix platforms
|
|
||||||
#
|
|
||||||
#log_to_journald = false
|
|
||||||
|
|
||||||
# The syslog identifier to use with journald logging
|
|
||||||
#
|
|
||||||
# Only used when journald logging is enabled
|
|
||||||
#
|
|
||||||
# Defaults to the binary name
|
|
||||||
#
|
|
||||||
#journald_identifier =
|
|
||||||
|
|
||||||
# OpenID token expiration/TTL in seconds.
|
# OpenID token expiration/TTL in seconds.
|
||||||
#
|
#
|
||||||
# These are the OpenID tokens that are primarily used for Matrix account
|
# These are the OpenID tokens that are primarily used for Matrix account
|
||||||
|
@ -1104,13 +1053,6 @@
|
||||||
#
|
#
|
||||||
#presence_timeout_remote_users = true
|
#presence_timeout_remote_users = true
|
||||||
|
|
||||||
# Allow local read receipts.
|
|
||||||
#
|
|
||||||
# Disabling this will effectively also disable outgoing federated read
|
|
||||||
# receipts.
|
|
||||||
#
|
|
||||||
#allow_local_read_receipts = true
|
|
||||||
|
|
||||||
# Allow receiving incoming read receipts from remote servers.
|
# Allow receiving incoming read receipts from remote servers.
|
||||||
#
|
#
|
||||||
#allow_incoming_read_receipts = true
|
#allow_incoming_read_receipts = true
|
||||||
|
@ -1119,13 +1061,6 @@
|
||||||
#
|
#
|
||||||
#allow_outgoing_read_receipts = true
|
#allow_outgoing_read_receipts = true
|
||||||
|
|
||||||
# Allow local typing updates.
|
|
||||||
#
|
|
||||||
# Disabling this will effectively also disable outgoing federated typing
|
|
||||||
# updates.
|
|
||||||
#
|
|
||||||
#allow_local_typing = true
|
|
||||||
|
|
||||||
# Allow outgoing typing updates to federation.
|
# Allow outgoing typing updates to federation.
|
||||||
#
|
#
|
||||||
#allow_outgoing_typing = true
|
#allow_outgoing_typing = true
|
||||||
|
|
22
debian/README.md
vendored
22
debian/README.md
vendored
|
@ -1,23 +1,29 @@
|
||||||
# Continuwuity for Debian
|
# Continuwuity for Debian
|
||||||
|
|
||||||
This document provides information about downloading and deploying the Debian package. You can also use this guide for other `apt`-based distributions such as Ubuntu.
|
Information about downloading and deploying the Debian package. This may also be
|
||||||
|
referenced for other `apt`-based distros such as Ubuntu.
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
|
It is recommended to see the [generic deployment guide](../deploying/generic.md)
|
||||||
|
for further information if needed as usage of the Debian package is generally
|
||||||
|
related.
|
||||||
|
|
||||||
No `apt` repository is currently available. This feature is in development.
|
No `apt` repository is currently offered yet, it is in the works/development.
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
After installation, Continuwuity places the example configuration at `/etc/conduwuit/conduwuit.toml` as the default configuration file. The configuration file indicates which settings you must change before starting the service.
|
When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml`
|
||||||
|
as the default config. The config mentions things required to be changed before
|
||||||
|
starting.
|
||||||
|
|
||||||
You can customize additional settings by uncommenting and modifying the configuration options in `/etc/conduwuit/conduwuit.toml`.
|
You can tweak more detailed settings by uncommenting and setting the config
|
||||||
|
options in `/etc/conduwuit/conduwuit.toml`.
|
||||||
|
|
||||||
### Running
|
### Running
|
||||||
|
|
||||||
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/sbin/conduwuit`.
|
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary is installed at `/usr/sbin/conduwuit`.
|
||||||
|
|
||||||
By default, this package assumes that Continuwuity runs behind a reverse proxy. The default configuration options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS. To federate properly, you must set up TLS certificates and certificate renewal.
|
This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate.
|
||||||
|
|
||||||
For information about setting up a reverse proxy and TLS, consult online documentation and guides. The [generic deployment guide](../deploying/generic.md#setting-up-the-reverse-proxy) documents Caddy, which is the most user-friendly option for reverse proxy configuration.
|
Consult various online documentation and guides on setting up a reverse proxy and TLS. Caddy is documented at the [generic deployment guide](../deploying/generic.md#setting-up-the-reverse-proxy) as it's the easiest and most user friendly.
|
||||||
|
|
3
debian/conduwuit.service
vendored
3
debian/conduwuit.service
vendored
|
@ -14,9 +14,6 @@ Type=notify
|
||||||
|
|
||||||
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||||
|
|
||||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=1"
|
|
||||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
|
||||||
|
|
||||||
ExecStart=/usr/sbin/conduwuit
|
ExecStart=/usr/sbin/conduwuit
|
||||||
|
|
||||||
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
|
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
- [Appservices](appservices.md)
|
- [Appservices](appservices.md)
|
||||||
- [Maintenance](maintenance.md)
|
- [Maintenance](maintenance.md)
|
||||||
- [Troubleshooting](troubleshooting.md)
|
- [Troubleshooting](troubleshooting.md)
|
||||||
- [Admin Command Reference](admin_reference.md)
|
|
||||||
- [Development](development.md)
|
- [Development](development.md)
|
||||||
- [Contributing](contributing.md)
|
- [Contributing](contributing.md)
|
||||||
- [Code Style Guide](development/code_style.md)
|
- [Code Style Guide](development/code_style.md)
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,5 +1,5 @@
|
||||||
# Continuwuity for Arch Linux
|
# Continuwuity for Arch Linux
|
||||||
|
|
||||||
Continuwuity is available in the `archlinuxcn` repository and AUR with the same package name `continuwuity`, which includes the latest tagged version. The development version is available on AUR as `continuwuity-git`.
|
Continuwuity is available on the `archlinuxcn` repository and AUR, with the same package name `continuwuity`, which includes latest taggged version. The development version is available on AUR as `continuwuity-git`
|
||||||
|
|
||||||
Simply install the `continuwuity` package. Configure the service in `/etc/conduwuit/conduwuit.toml`, then enable and start the continuwuity.service.
|
Simply install the `continuwuity` package. Configure the service in `/etc/conduwuit/conduwuit.toml`, then enable/start the continuwuity.service.
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
To run Continuwuity with Docker, you can either build the image yourself or pull it
|
To run Continuwuity with Docker you can either build the image yourself or pull it
|
||||||
from a registry.
|
from a registry.
|
||||||
|
|
||||||
### Use a registry
|
### Use a registry
|
||||||
|
@ -26,7 +26,7 @@ to pull it to your machine.
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
||||||
When you have the image, you can simply run it with
|
When you have the image you can simply run it with
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d -p 8448:6167 \
|
docker run -d -p 8448:6167 \
|
||||||
|
@ -36,7 +36,7 @@ docker run -d -p 8448:6167 \
|
||||||
--name continuwuity $LINK
|
--name continuwuity $LINK
|
||||||
```
|
```
|
||||||
|
|
||||||
or you can use [Docker Compose](#docker-compose).
|
or you can use [docker compose](#docker-compose).
|
||||||
|
|
||||||
The `-d` flag lets the container run in detached mode. You may supply an
|
The `-d` flag lets the container run in detached mode. You may supply an
|
||||||
optional `continuwuity.toml` config file, the example config can be found
|
optional `continuwuity.toml` config file, the example config can be found
|
||||||
|
@ -46,15 +46,15 @@ using env vars. For an overview of possible values, please take a look at the
|
||||||
[`docker-compose.yml`](docker-compose.yml) file.
|
[`docker-compose.yml`](docker-compose.yml) file.
|
||||||
|
|
||||||
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
||||||
flag, which cleans up everything related to your container after you stop
|
flag, which will clean up everything related to your container after you stop
|
||||||
it.
|
it.
|
||||||
|
|
||||||
### Docker-compose
|
### Docker-compose
|
||||||
|
|
||||||
If the `docker run` command is not suitable for you or your setup, you can also use one
|
If the `docker run` command is not for you or your setup, you can also use one
|
||||||
of the provided `docker-compose` files.
|
of the provided `docker-compose` files.
|
||||||
|
|
||||||
Depending on your proxy setup, you can use one of the following files:
|
Depending on your proxy setup, you can use one of the following files;
|
||||||
|
|
||||||
- If you already have a `traefik` instance set up, use
|
- If you already have a `traefik` instance set up, use
|
||||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
||||||
|
@ -65,7 +65,7 @@ Depending on your proxy setup, you can use one of the following files:
|
||||||
`example.com` placeholders with your own domain
|
`example.com` placeholders with your own domain
|
||||||
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
|
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
|
||||||
|
|
||||||
When picking the Traefik-related compose file, rename it to
|
When picking the traefik-related compose file, rename it so it matches
|
||||||
`docker-compose.yml`, and rename the override file to
|
`docker-compose.yml`, and rename the override file to
|
||||||
`docker-compose.override.yml`. Edit the latter with the values you want for your
|
`docker-compose.override.yml`. Edit the latter with the values you want for your
|
||||||
server.
|
server.
|
||||||
|
@ -77,18 +77,18 @@ create the `caddy` network before spinning up the containers:
|
||||||
docker network create caddy
|
docker network create caddy
|
||||||
```
|
```
|
||||||
|
|
||||||
After that, you can rename it to `docker-compose.yml` and spin up the
|
After that, you can rename it so it matches `docker-compose.yml` and spin up the
|
||||||
containers!
|
containers!
|
||||||
|
|
||||||
Additional info about deploying Continuwuity can be found [here](generic.md).
|
Additional info about deploying Continuwuity can be found [here](generic.md).
|
||||||
|
|
||||||
### Build
|
### Build
|
||||||
|
|
||||||
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables efficient multi-platform builds.
|
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently.
|
||||||
|
|
||||||
The resulting images are widely compatible with Docker and other container runtimes like Podman or containerd.
|
The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd.
|
||||||
|
|
||||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition.
|
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition.
|
||||||
|
|
||||||
To build an image locally using Docker Buildx, you can typically run a command like:
|
To build an image locally using Docker Buildx, you can typically run a command like:
|
||||||
|
|
||||||
|
@ -109,8 +109,8 @@ Refer to the Docker Buildx documentation for more advanced build options.
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
||||||
If you have already built the image or want to use one from the registries, you
|
If you already have built the image or want to use one from the registries, you
|
||||||
can start the container and everything else in the compose file in detached
|
can just start the container and everything else in the compose file in detached
|
||||||
mode with:
|
mode with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -121,24 +121,22 @@ docker compose up -d
|
||||||
|
|
||||||
### Use Traefik as Proxy
|
### Use Traefik as Proxy
|
||||||
|
|
||||||
As a container user, you probably know about Traefik. It is an easy-to-use
|
As a container user, you probably know about Traefik. It is a easy to use
|
||||||
reverse proxy for making containerized apps and services available through the
|
reverse proxy for making containerized app and services available through the
|
||||||
web. With the two provided files,
|
web. With the two provided files,
|
||||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
||||||
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy
|
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy
|
||||||
to deploy and use Continuwuity, with a small caveat. If you have already looked at
|
to deploy and use Continuwuity, with a little caveat. If you already took a look at
|
||||||
the files, you should have seen the `well-known` service, which is the
|
the files, then you should have seen the `well-known` service, and that is the
|
||||||
small caveat. Traefik is simply a proxy and load balancer and cannot
|
little caveat. Traefik is simply a proxy and loadbalancer and is not able to
|
||||||
serve any kind of content. For Continuwuity to federate, we need to either
|
serve any kind of content, but for Continuwuity to federate, we need to either
|
||||||
expose ports `443` and `8448` or serve two endpoints: `.well-known/matrix/client`
|
expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client`
|
||||||
and `.well-known/matrix/server`.
|
and `.well-known/matrix/server`.
|
||||||
|
|
||||||
With the service `well-known`, we use a single `nginx` container that serves
|
With the service `well-known` we use a single `nginx` container that will serve
|
||||||
those two files.
|
those two files.
|
||||||
|
|
||||||
Alternatively, you can use Continuwuity's built-in delegation file capability. Set up the delegation files in the configuration file, and then proxy paths under `/.well-known/matrix` to continuwuity. For example, the label ``traefik.http.routers.continuwuity.rule=(Host(`matrix.ellis.link`) || (Host(`ellis.link`) && PathPrefix(`/.well-known/matrix`)))`` does this for the domain `ellis.link`.
|
|
||||||
|
|
||||||
## Voice communication
|
## Voice communication
|
||||||
|
|
||||||
See the [TURN](../turn.md) page.
|
See the [TURN](../turn.md) page.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Continuwuity for FreeBSD
|
# Continuwuity for FreeBSD
|
||||||
|
|
||||||
Continuwuity currently does not provide FreeBSD builds or FreeBSD packaging. However, Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
|
Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
|
||||||
|
|
||||||
Contributions to get Continuwuity packaged for FreeBSD are welcome.
|
Contributions for getting Continuwuity packaged are welcome.
|
||||||
|
|
|
@ -13,42 +13,31 @@
|
||||||
You may simply download the binary that fits your machine architecture (x86_64
|
You may simply download the binary that fits your machine architecture (x86_64
|
||||||
or aarch64). Run `uname -m` to see what you need.
|
or aarch64). Run `uname -m` to see what you need.
|
||||||
|
|
||||||
You can download prebuilt fully static musl binaries from the latest tagged
|
Prebuilt fully static musl binaries can be downloaded from the latest tagged
|
||||||
release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or
|
release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or
|
||||||
from the `main` CI branch workflow artifact output. These also include Debian/Ubuntu
|
`main` CI branch workflow artifact output. These also include Debian/Ubuntu
|
||||||
packages.
|
packages.
|
||||||
|
|
||||||
You can download these directly using curl. The `ci-bins` are CI workflow binaries organized by commit
|
These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit
|
||||||
hash/revision, and `releases` are tagged releases. Sort by descending last
|
hash/revision, and `releases` are tagged releases. Sort by descending last
|
||||||
modified date to find the latest.
|
modified for the latest.
|
||||||
|
|
||||||
These binaries have jemalloc and io_uring statically linked and included with
|
These binaries have jemalloc and io_uring statically linked and included with
|
||||||
them, so no additional dynamic dependencies need to be installed.
|
them, so no additional dynamic dependencies need to be installed.
|
||||||
|
|
||||||
For the **best** performance: if you are using an `x86_64` CPU made in the last ~15 years,
|
For the **best** performance; if using an `x86_64` CPU made in the last ~15 years,
|
||||||
we recommend using the `-haswell-` optimized binaries. These set
|
we recommend using the `-haswell-` optimised binaries. This sets
|
||||||
`-march=haswell`, which provides the most compatible and highest performance with
|
`-march=haswell` which is the most compatible and highest performance with
|
||||||
optimized binaries. The database backend, RocksDB, benefits most from this as it
|
optimised binaries. The database backend, RocksDB, most benefits from this as it
|
||||||
uses hardware-accelerated CRC32 hashing/checksumming, which is critical
|
will then use hardware accelerated CRC32 hashing/checksumming which is critical
|
||||||
for performance.
|
for performance.
|
||||||
|
|
||||||
### Compiling
|
### Compiling
|
||||||
|
|
||||||
Alternatively, you may compile the binary yourself.
|
Alternatively, you may compile the binary yourself. We recommend using
|
||||||
|
Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most
|
||||||
### Building with the Rust toolchain
|
guaranteed reproducibiltiy and easiest to get a build environment and output
|
||||||
|
going. This also allows easy cross-compilation.
|
||||||
If wanting to build using standard Rust toolchains, make sure you install:
|
|
||||||
|
|
||||||
- (On linux) `liburing-dev` on the compiling machine, and `liburing` on the target host
|
|
||||||
- (On linux) `pkg-config` on the compiling machine to allow finding `liburing`
|
|
||||||
- A C++ compiler and (on linux) `libclang` for RocksDB
|
|
||||||
|
|
||||||
You can build Continuwuity using `cargo build --release --all-features`.
|
|
||||||
|
|
||||||
### Building with Nix
|
|
||||||
|
|
||||||
If you prefer, you can use Nix (or [Lix](https://lix.systems)) to build Continuwuity. This provides improved reproducibility and makes it easy to set up a build environment and generate output. This approach also allows for easy cross-compilation.
|
|
||||||
|
|
||||||
You can run the `nix build -L .#static-x86_64-linux-musl-all-features` or
|
You can run the `nix build -L .#static-x86_64-linux-musl-all-features` or
|
||||||
`nix build -L .#static-aarch64-linux-musl-all-features` commands based
|
`nix build -L .#static-aarch64-linux-musl-all-features` commands based
|
||||||
|
@ -56,11 +45,17 @@ on architecture to cross-compile the necessary static binary located at
|
||||||
`result/bin/conduwuit`. This is reproducible with the static binaries produced
|
`result/bin/conduwuit`. This is reproducible with the static binaries produced
|
||||||
in our CI.
|
in our CI.
|
||||||
|
|
||||||
|
If wanting to build using standard Rust toolchains, make sure you install:
|
||||||
|
- `liburing-dev` on the compiling machine, and `liburing` on the target host
|
||||||
|
- LLVM and libclang for RocksDB
|
||||||
|
|
||||||
|
You can build Continuwuity using `cargo build --release --all-features`
|
||||||
|
|
||||||
## Adding a Continuwuity user
|
## Adding a Continuwuity user
|
||||||
|
|
||||||
While Continuwuity can run as any user, it is better to use dedicated users for
|
While Continuwuity can run as any user it is better to use dedicated users for
|
||||||
different services. This also ensures that the file permissions
|
different services. This also allows you to make sure that the file permissions
|
||||||
are set up correctly.
|
are correctly set up.
|
||||||
|
|
||||||
In Debian, you can use this command to create a Continuwuity user:
|
In Debian, you can use this command to create a Continuwuity user:
|
||||||
|
|
||||||
|
@ -76,18 +71,18 @@ sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity
|
||||||
|
|
||||||
## Forwarding ports in the firewall or the router
|
## Forwarding ports in the firewall or the router
|
||||||
|
|
||||||
Matrix's default federation port is 8448, and clients must use port 443.
|
Matrix's default federation port is port 8448, and clients must be using port 443.
|
||||||
If you would like to use only port 443 or a different port, you will need to set up
|
If you would like to use only port 443, or a different port, you will need to setup
|
||||||
delegation. Continuwuity has configuration options for delegation, or you can configure
|
delegation. Continuwuity has config options for doing delegation, or you can configure
|
||||||
your reverse proxy to manually serve the necessary JSON files for delegation
|
your reverse proxy to manually serve the necessary JSON files to do delegation
|
||||||
(see the `[global.well_known]` config section).
|
(see the `[global.well_known]` config section).
|
||||||
|
|
||||||
If Continuwuity runs behind a router or in a container and has a different public
|
If Continuwuity runs behind a router or in a container and has a different public
|
||||||
IP address than the host system, you need to forward these public ports directly
|
IP address than the host system these public ports need to be forwarded directly
|
||||||
or indirectly to the port mentioned in the configuration.
|
or indirectly to the port mentioned in the config.
|
||||||
|
|
||||||
Note for NAT users: if you have trouble connecting to your server from inside
|
Note for NAT users; if you have trouble connecting to your server from the inside
|
||||||
your network, check if your router supports "NAT
|
of your network, you need to research your router and see if it supports "NAT
|
||||||
hairpinning" or "NAT loopback".
|
hairpinning" or "NAT loopback".
|
||||||
|
|
||||||
If your router does not support this feature, you need to research doing local
|
If your router does not support this feature, you need to research doing local
|
||||||
|
@ -97,19 +92,19 @@ on the network level, consider something like NextDNS or Pi-Hole.
|
||||||
|
|
||||||
## Setting up a systemd service
|
## Setting up a systemd service
|
||||||
|
|
||||||
You can find two example systemd units for Continuwuity
|
Two example systemd units for Continuwuity can be found
|
||||||
[on the configuration page](../configuration/examples.md#debian-systemd-unit-file).
|
[on the configuration page](../configuration/examples.md#debian-systemd-unit-file).
|
||||||
You may need to change the `ExecStart=` path to match where you placed the Continuwuity
|
You may need to change the `ExecStart=` path to where you placed the Continuwuity
|
||||||
binary if it is not in `/usr/bin/conduwuit`.
|
binary if it is not `/usr/bin/conduwuit`.
|
||||||
|
|
||||||
On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros
|
On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros
|
||||||
and OpenSUSE), put `$EscapeControlCharactersOnReceive off` inside
|
and OpenSUSE), put `$EscapeControlCharactersOnReceive off` inside
|
||||||
`/etc/rsyslog.conf` to allow color in logs.
|
`/etc/rsyslog.conf` to allow color in logs.
|
||||||
|
|
||||||
If you are using a different `database_path` than the systemd unit's
|
If you are using a different `database_path` other than the systemd unit
|
||||||
configured default `/var/lib/conduwuit`, you need to add your path to the
|
configured default `/var/lib/conduwuit`, you need to add your path to the
|
||||||
systemd unit's `ReadWritePaths=`. You can do this by either directly editing
|
systemd unit's `ReadWritePaths=`. This can be done by either directly editing
|
||||||
`conduwuit.service` and reloading systemd, or by running `systemctl edit conduwuit.service`
|
`conduwuit.service` and reloading systemd, or running `systemctl edit conduwuit.service`
|
||||||
and entering the following:
|
and entering the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -119,8 +114,8 @@ ReadWritePaths=/path/to/custom/database/path
|
||||||
|
|
||||||
## Creating the Continuwuity configuration file
|
## Creating the Continuwuity configuration file
|
||||||
|
|
||||||
Now you need to create the Continuwuity configuration file in
|
Now we need to create the Continuwuity's config file in
|
||||||
`/etc/continuwuity/continuwuity.toml`. You can find an example configuration at
|
`/etc/continuwuity/continuwuity.toml`. The example config can be found at
|
||||||
[conduwuit-example.toml](../configuration/examples.md).
|
[conduwuit-example.toml](../configuration/examples.md).
|
||||||
|
|
||||||
**Please take a moment to read the config. You need to change at least the
|
**Please take a moment to read the config. You need to change at least the
|
||||||
|
@ -130,8 +125,8 @@ RocksDB is the only supported database backend.
|
||||||
|
|
||||||
## Setting the correct file permissions
|
## Setting the correct file permissions
|
||||||
|
|
||||||
If you are using a dedicated user for Continuwuity, you need to allow it to
|
If you are using a dedicated user for Continuwuity, you will need to allow it to
|
||||||
read the configuration. To do this, run:
|
read the config. To do that you can run this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo chown -R root:root /etc/conduwuit
|
sudo chown -R root:root /etc/conduwuit
|
||||||
|
@ -148,13 +143,13 @@ sudo chmod 700 /var/lib/conduwuit/
|
||||||
|
|
||||||
## Setting up the Reverse Proxy
|
## Setting up the Reverse Proxy
|
||||||
|
|
||||||
We recommend Caddy as a reverse proxy because it is trivial to use and handles TLS certificates, reverse proxy headers, etc. transparently with proper defaults.
|
We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults.
|
||||||
For other software, please refer to their respective documentation or online guides.
|
For other software, please refer to their respective documentation or online guides.
|
||||||
|
|
||||||
### Caddy
|
### Caddy
|
||||||
|
|
||||||
After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile`
|
After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile`
|
||||||
and enter the following (substitute your actual server name):
|
and enter this (substitute for your server name).
|
||||||
|
|
||||||
```caddyfile
|
```caddyfile
|
||||||
your.server.name, your.server.name:8448 {
|
your.server.name, your.server.name:8448 {
|
||||||
|
@ -173,9 +168,9 @@ sudo systemctl enable --now caddy
|
||||||
|
|
||||||
### Other Reverse Proxies
|
### Other Reverse Proxies
|
||||||
|
|
||||||
As we prefer our users to use Caddy, we do not provide configuration files for other proxies.
|
As we would prefer our users to use Caddy, we will not provide configuration files for other proxys.
|
||||||
|
|
||||||
You will need to reverse proxy everything under the following routes:
|
You will need to reverse proxy everything under following routes:
|
||||||
- `/_matrix/` - core Matrix C-S and S-S APIs
|
- `/_matrix/` - core Matrix C-S and S-S APIs
|
||||||
- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and
|
- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and
|
||||||
`/server_version`
|
`/server_version`
|
||||||
|
@ -198,16 +193,16 @@ Examples of delegation:
|
||||||
|
|
||||||
For Apache and Nginx there are many examples available online.
|
For Apache and Nginx there are many examples available online.
|
||||||
|
|
||||||
Lighttpd is not supported as it appears to interfere with the `X-Matrix` Authorization
|
Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization
|
||||||
header, making federation non-functional. If you find a workaround, please share it so we can add it to this documentation.
|
header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here.
|
||||||
|
|
||||||
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from interfering with the `X-Matrix` header (note that Apache is not ideal as a general reverse proxy, so we discourage using it if alternatives are available).
|
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can).
|
||||||
|
|
||||||
If using Nginx, you need to pass the request URI to Continuwuity using `$request_uri`, like this:
|
If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so:
|
||||||
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
||||||
- `proxy_pass http://127.0.0.1:6167;`
|
- `proxy_pass http://127.0.0.1:6167;`
|
||||||
|
|
||||||
Nginx users need to increase the `client_max_body_size` setting (default is 1M) to match the
|
Nginx users need to increase `client_max_body_size` (default is 1M) to match
|
||||||
`max_request_size` defined in conduwuit.toml.
|
`max_request_size` defined in conduwuit.toml.
|
||||||
|
|
||||||
## You're done
|
## You're done
|
||||||
|
@ -227,7 +222,7 @@ sudo systemctl enable conduwuit
|
||||||
## How do I know it works?
|
## How do I know it works?
|
||||||
|
|
||||||
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your
|
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your
|
||||||
homeserver address, and try to register.
|
homeserver and try to register.
|
||||||
|
|
||||||
You can also use these commands as a quick health check (replace
|
You can also use these commands as a quick health check (replace
|
||||||
`your.server.name`).
|
`your.server.name`).
|
||||||
|
@ -242,10 +237,10 @@ curl https://your.server.name:8448/_conduwuit/server_version
|
||||||
curl https://your.server.name:8448/_matrix/federation/v1/version
|
curl https://your.server.name:8448/_matrix/federation/v1/version
|
||||||
```
|
```
|
||||||
|
|
||||||
- To check if your server can communicate with other homeservers, use the
|
- To check if your server can talk with other homeservers, you can use the
|
||||||
[Matrix Federation Tester](https://federationtester.matrix.org/). If you can
|
[Matrix Federation Tester](https://federationtester.matrix.org/). If you can
|
||||||
register but cannot join federated rooms, check your configuration and verify
|
register but cannot join federated rooms check your config again and also check
|
||||||
that port 8448 is open and forwarded correctly.
|
if the port 8448 is open and forwarded correctly.
|
||||||
|
|
||||||
# What's next?
|
# What's next?
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
# Continuwuity for Kubernetes
|
# Continuwuity for Kubernetes
|
||||||
|
|
||||||
Continuwuity doesn't support horizontal scalability or distributed loading
|
Continuwuity doesn't support horizontal scalability or distributed loading
|
||||||
natively. However, a community-maintained Helm Chart is available here to run
|
natively, however a community maintained Helm Chart is available here to run
|
||||||
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
|
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
|
||||||
|
|
||||||
This should be compatible with Continuwuity, but you will need to change the image reference.
|
This should be compatible with continuwuity, but you will need to change the image reference.
|
||||||
|
|
||||||
If changes need to be made, please reach out to the maintainer, as this is not maintained or controlled by the Continuwuity maintainers.
|
Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers.
|
||||||
|
|
|
@ -1,130 +1,75 @@
|
||||||
# Continuwuity for NixOS
|
# Continuwuity for NixOS
|
||||||
|
|
||||||
NixOS packages Continuwuity as `matrix-continuwuity`. This package includes both the Continuwuity software and a dedicated NixOS module for configuration and deployment.
|
Continuwuity can be acquired by Nix (or [Lix][lix]) from various places:
|
||||||
|
|
||||||
## Installation methods
|
* The `flake.nix` at the root of the repo
|
||||||
|
* The `default.nix` at the root of the repo
|
||||||
|
* From Continuwuity's binary cache
|
||||||
|
|
||||||
You can acquire Continuwuity with Nix (or [Lix][lix]) from these sources:
|
### NixOS module
|
||||||
|
|
||||||
* Directly from Nixpkgs using the official package (`pkgs.matrix-continuwuity`)
|
The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions
|
||||||
* The `flake.nix` at the root of the Continuwuity repo
|
welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure
|
||||||
* The `default.nix` at the root of the Continuwuity repo
|
Continuwuity.
|
||||||
|
|
||||||
## NixOS module
|
### Conduit NixOS Config Module and SQLite
|
||||||
|
|
||||||
Continuwuity now has an official NixOS module that simplifies configuration and deployment. The module is available in Nixpkgs as `services.matrix-continuwuity` from NixOS 25.05.
|
Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend.
|
||||||
|
Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB.
|
||||||
|
Make sure that you are using the RocksDB backend before migrating!
|
||||||
|
|
||||||
Here's a basic example of how to use the module:
|
There is a [tool to migrate a Conduit SQLite database to
|
||||||
|
RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
|
||||||
|
|
||||||
```nix
|
If you want to run the latest code, you should get Continuwuity from the `flake.nix`
|
||||||
{ config, pkgs, ... }:
|
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
||||||
|
appropriately to use Continuwuity instead of Conduit.
|
||||||
{
|
|
||||||
services.matrix-continuwuity = {
|
|
||||||
enable = true;
|
|
||||||
settings = {
|
|
||||||
global = {
|
|
||||||
server_name = "example.com";
|
|
||||||
# Listening on localhost by default
|
|
||||||
# address and port are handled automatically
|
|
||||||
allow_registration = false;
|
|
||||||
allow_encryption = true;
|
|
||||||
allow_federation = true;
|
|
||||||
trusted_servers = [ "matrix.org" ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Available options
|
|
||||||
|
|
||||||
The NixOS module provides these configuration options:
|
|
||||||
|
|
||||||
- `enable`: Enable the Continuwuity service
|
|
||||||
- `user`: The user to run Continuwuity as (defaults to "continuwuity")
|
|
||||||
- `group`: The group to run Continuwuity as (defaults to "continuwuity")
|
|
||||||
- `extraEnvironment`: Extra environment variables to pass to the Continuwuity server
|
|
||||||
- `package`: The Continuwuity package to use
|
|
||||||
- `settings`: The Continuwuity configuration (in TOML format)
|
|
||||||
|
|
||||||
Use the `settings` option to configure Continuwuity itself. See the [example configuration file](../configuration/examples.md#example-configuration) for all available options.
|
|
||||||
|
|
||||||
### UNIX sockets
|
### UNIX sockets
|
||||||
|
|
||||||
The NixOS module natively supports UNIX sockets through the `global.unix_socket_path` option. When using UNIX sockets, set `global.address` to `null`:
|
Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module
|
||||||
|
a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX
|
||||||
|
socket option does not exist in Conduit, and the module forcibly sets the `address` and
|
||||||
|
`port` config options.
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
services.matrix-continuwuity = {
|
options.services.matrix-conduit.settings = lib.mkOption {
|
||||||
enable = true;
|
apply = old: old // (
|
||||||
settings = {
|
if (old.global ? "unix_socket_path")
|
||||||
global = {
|
then { global = builtins.removeAttrs old.global [ "address" "port" ]; }
|
||||||
server_name = "example.com";
|
else { }
|
||||||
address = null; # Must be null when using unix_socket_path
|
);
|
||||||
unix_socket_path = "/run/continuwuity/continuwuity.sock";
|
|
||||||
unix_socket_perms = 660; # Default permissions for the socket
|
|
||||||
# ...
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The module automatically sets the correct `RestrictAddressFamilies` in the systemd service configuration to allow access to UNIX sockets.
|
Additionally, the [`matrix-conduit` systemd unit][systemd-unit] in the module does not allow
|
||||||
|
the `AF_UNIX` socket address family in their systemd unit's `RestrictAddressFamilies=` which
|
||||||
|
disallows the namespace from accessing or creating UNIX sockets and has to be enabled like so:
|
||||||
|
|
||||||
### RocksDB database
|
```nix
|
||||||
|
systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ];
|
||||||
|
```
|
||||||
|
|
||||||
Continuwuity exclusively uses RocksDB as its database backend. The system configures the database path automatically to `/var/lib/continuwuity/` and you cannot change it due to the service's reliance on systemd's StateDir.
|
Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and
|
||||||
|
published by the community, would be appreciated.
|
||||||
If you're migrating from Conduit with SQLite, use this [tool to migrate a Conduit SQLite database to RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
|
|
||||||
|
|
||||||
### jemalloc and hardened profile
|
### jemalloc and hardened profile
|
||||||
|
|
||||||
Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] because it uses `scudo` by default. Either disable/hide `scudo` from Continuwuity or disable jemalloc like this:
|
Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix]
|
||||||
|
due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or
|
||||||
|
disable jemalloc like so:
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
services.matrix-continuwuity = {
|
let
|
||||||
enable = true;
|
conduwuit = pkgs.unstable.conduwuit.override {
|
||||||
package = pkgs.matrix-continuwuity.override {
|
|
||||||
enableJemalloc = false;
|
enableJemalloc = false;
|
||||||
};
|
};
|
||||||
# ...
|
in
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
## Upgrading from Conduit
|
|
||||||
|
|
||||||
If you previously used Conduit with the `services.matrix-conduit` module:
|
|
||||||
|
|
||||||
1. Ensure your Conduit uses the RocksDB backend, or migrate from SQLite using the [migration tool](https://github.com/ShadowJonathan/conduit_toolbox/)
|
|
||||||
2. Switch to the new module by changing `services.matrix-conduit` to `services.matrix-continuwuity` in your configuration
|
|
||||||
3. Update any custom configuration to match the new module's structure
|
|
||||||
|
|
||||||
## Reverse proxy configuration
|
|
||||||
|
|
||||||
You'll need to set up a reverse proxy (like nginx or caddy) to expose Continuwuity to the internet. Configure your reverse proxy to forward requests to `/_matrix` on port 443 and 8448 to your Continuwuity instance.
|
|
||||||
|
|
||||||
Here's an example nginx configuration:
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
server {
|
|
||||||
listen 443 ssl;
|
|
||||||
listen [::]:443 ssl;
|
|
||||||
listen 8448 ssl;
|
|
||||||
listen [::]:8448 ssl;
|
|
||||||
|
|
||||||
server_name example.com;
|
|
||||||
|
|
||||||
# SSL configuration here...
|
|
||||||
|
|
||||||
location /_matrix/ {
|
|
||||||
proxy_pass http://127.0.0.1:6167$request_uri;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
[lix]: https://lix.systems/
|
[lix]: https://lix.systems/
|
||||||
[hardened.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/profiles/hardened.nix
|
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
|
||||||
|
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
|
||||||
|
[hardened.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/profiles/hardened.nix#L22
|
||||||
|
[systemd-unit]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/conduit.nix#L132
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Command-Line Help for `continuwuity`
|
|
||||||
|
|
||||||
This document contains the help content for the `continuwuity` command-line program.
|
|
||||||
|
|
||||||
**Command Overview:**
|
|
||||||
|
|
||||||
* [`continuwuity`↴](#continuwuity)
|
|
||||||
|
|
||||||
## `continuwuity`
|
|
||||||
|
|
||||||
a very cool Matrix chat homeserver written in Rust
|
|
||||||
|
|
||||||
**Usage:** `continuwuity [OPTIONS]`
|
|
||||||
|
|
||||||
###### **Options:**
|
|
||||||
|
|
||||||
* `-c`, `--config <CONFIG>` — Path to the config TOML file (optional)
|
|
||||||
* `-O`, `--option <OPTION>` — Override a configuration variable using TOML 'key=value' syntax
|
|
||||||
* `--read-only` — Run in a stricter read-only --maintenance mode
|
|
||||||
* `--maintenance` — Run in maintenance mode while refusing connections
|
|
||||||
* `--execute <EXECUTE>` — Execute console command automatically after startup
|
|
153
flake.lock
generated
153
flake.lock
generated
|
@ -10,11 +10,11 @@
|
||||||
"nixpkgs-stable": "nixpkgs-stable"
|
"nixpkgs-stable": "nixpkgs-stable"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1751403276,
|
"lastModified": 1738524606,
|
||||||
"narHash": "sha256-V0EPQNsQko1a8OqIWc2lLviLnMpR1m08Ej00z5RVTfs=",
|
"narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=",
|
||||||
"owner": "zhaofengli",
|
"owner": "zhaofengli",
|
||||||
"repo": "attic",
|
"repo": "attic",
|
||||||
"rev": "896ad88fa57ad5dbcd267c0ac51f1b71ccfcb4dd",
|
"rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -32,11 +32,11 @@
|
||||||
"nixpkgs": "nixpkgs_4"
|
"nixpkgs": "nixpkgs_4"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1748883665,
|
"lastModified": 1737621947,
|
||||||
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
|
"narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=",
|
||||||
"owner": "cachix",
|
"owner": "cachix",
|
||||||
"repo": "cachix",
|
"repo": "cachix",
|
||||||
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
|
"rev": "f65a3cd5e339c223471e64c051434616e18cc4f5",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -63,11 +63,11 @@
|
||||||
"nixpkgs": "nixpkgs_2"
|
"nixpkgs": "nixpkgs_2"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1744206633,
|
"lastModified": 1728672398,
|
||||||
"narHash": "sha256-pb5aYkE8FOoa4n123slgHiOf1UbNSnKe5pEZC+xXD5g=",
|
"narHash": "sha256-KxuGSoVUFnQLB2ZcYODW7AVPAh9JqRlD5BrfsC/Q4qs=",
|
||||||
"owner": "cachix",
|
"owner": "cachix",
|
||||||
"repo": "cachix",
|
"repo": "cachix",
|
||||||
"rev": "8a60090640b96f9df95d1ab99e5763a586be1404",
|
"rev": "aac51f698309fd0f381149214b7eee213c66ef0a",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -77,6 +77,23 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"complement": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1741891349,
|
||||||
|
"narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=",
|
||||||
|
"owner": "girlbossceo",
|
||||||
|
"repo": "complement",
|
||||||
|
"rev": "e587b3df569cba411aeac7c20b6366d03c143745",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "girlbossceo",
|
||||||
|
"ref": "main",
|
||||||
|
"repo": "complement",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"crane": {
|
"crane": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
|
@ -100,11 +117,11 @@
|
||||||
},
|
},
|
||||||
"crane_2": {
|
"crane_2": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1750266157,
|
"lastModified": 1739936662,
|
||||||
"narHash": "sha256-tL42YoNg9y30u7zAqtoGDNdTyXTi8EALDeCB13FtbQA=",
|
"narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=",
|
||||||
"owner": "ipetkov",
|
"owner": "ipetkov",
|
||||||
"repo": "crane",
|
"repo": "crane",
|
||||||
"rev": "e37c943371b73ed87faf33f7583860f81f1d5a48",
|
"rev": "19de14aaeb869287647d9461cbd389187d8ecdb7",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -132,11 +149,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1748273445,
|
"lastModified": 1733323168,
|
||||||
"narHash": "sha256-5V0dzpNgQM0CHDsMzh+ludYeu1S+Y+IMjbaskSSdFh0=",
|
"narHash": "sha256-d5DwB4MZvlaQpN6OQ4SLYxb5jA4UH5EtV5t5WOtjLPU=",
|
||||||
"owner": "cachix",
|
"owner": "cachix",
|
||||||
"repo": "devenv",
|
"repo": "devenv",
|
||||||
"rev": "668a50d8b7bdb19a0131f53c9f6c25c9071e1ffb",
|
"rev": "efa9010b8b1cfd5dd3c7ed1e172a470c3b84a064",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -153,11 +170,11 @@
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1751525020,
|
"lastModified": 1740724364,
|
||||||
"narHash": "sha256-oDO6lCYS5Bf4jUITChj9XV7k3TP38DE0Ckz5n5ORCME=",
|
"narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "a1a5f92f47787e7df9f30e5e5ac13e679215aa1e",
|
"rev": "edf7d9e431cda8782e729253835f178a356d3aab",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -186,11 +203,11 @@
|
||||||
"flake-compat_2": {
|
"flake-compat_2": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1747046372,
|
"lastModified": 1733328505,
|
||||||
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
|
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
|
||||||
"owner": "edolstra",
|
"owner": "edolstra",
|
||||||
"repo": "flake-compat",
|
"repo": "flake-compat",
|
||||||
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
|
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -202,11 +219,11 @@
|
||||||
"flake-compat_3": {
|
"flake-compat_3": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1747046372,
|
"lastModified": 1733328505,
|
||||||
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
|
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
|
||||||
"owner": "edolstra",
|
"owner": "edolstra",
|
||||||
"repo": "flake-compat",
|
"repo": "flake-compat",
|
||||||
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
|
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -289,14 +306,15 @@
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
"cachix",
|
"cachix",
|
||||||
"nixpkgs"
|
"nixpkgs"
|
||||||
]
|
],
|
||||||
|
"nixpkgs-stable": "nixpkgs-stable_2"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1747372754,
|
"lastModified": 1733318908,
|
||||||
"narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=",
|
"narHash": "sha256-SVQVsbafSM1dJ4fpgyBqLZ+Lft+jcQuMtEL3lQWx2Sk=",
|
||||||
"owner": "cachix",
|
"owner": "cachix",
|
||||||
"repo": "git-hooks.nix",
|
"repo": "git-hooks.nix",
|
||||||
"rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46",
|
"rev": "6f4e2a2112050951a314d2733a994fbab94864c6",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -343,6 +361,23 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"liburing": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1740613216,
|
||||||
|
"narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=",
|
||||||
|
"owner": "axboe",
|
||||||
|
"repo": "liburing",
|
||||||
|
"rev": "e1003e496e66f9b0ae06674869795edf772d5500",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "axboe",
|
||||||
|
"ref": "master",
|
||||||
|
"repo": "liburing",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"nix": {
|
"nix": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-compat": [
|
"flake-compat": [
|
||||||
|
@ -366,11 +401,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1745930071,
|
"lastModified": 1727438425,
|
||||||
"narHash": "sha256-bYyjarS3qSNqxfgc89IoVz8cAFDkF9yPE63EJr+h50s=",
|
"narHash": "sha256-X8ES7I1cfNhR9oKp06F6ir4Np70WGZU5sfCOuNBEwMg=",
|
||||||
"owner": "domenkozar",
|
"owner": "domenkozar",
|
||||||
"repo": "nix",
|
"repo": "nix",
|
||||||
"rev": "b455edf3505f1bf0172b39a735caef94687d0d9c",
|
"rev": "f6c5ae4c1b2e411e6b1e6a8181cc84363d6a7546",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -449,13 +484,29 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs_2": {
|
"nixpkgs-stable_2": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1733212471,
|
"lastModified": 1730741070,
|
||||||
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
|
"narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
|
"rev": "d063c1dd113c91ab27959ba540c0d9753409edf3",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-24.05",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1730531603,
|
||||||
|
"narHash": "sha256-Dqg6si5CqIzm87sp57j5nTaeBbWhHFaVyG7V6L8k3lY=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "7ffd9ae656aec493492b44d0ddfb28e79a1ea25d",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -483,11 +534,11 @@
|
||||||
},
|
},
|
||||||
"nixpkgs_4": {
|
"nixpkgs_4": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1748190013,
|
"lastModified": 1733212471,
|
||||||
"narHash": "sha256-R5HJFflOfsP5FBtk+zE8FpL8uqE7n62jqOsADvVshhE=",
|
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "62b852f6c6742134ade1abdd2a21685fd617a291",
|
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -499,11 +550,11 @@
|
||||||
},
|
},
|
||||||
"nixpkgs_5": {
|
"nixpkgs_5": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1751498133,
|
"lastModified": 1740547748,
|
||||||
"narHash": "sha256-QWJ+NQbMU+NcU2xiyo7SNox1fAuwksGlQhpzBl76g1I=",
|
"narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "d55716bb59b91ae9d1ced4b1ccdea7a442ecbfdb",
|
"rev": "3a05eebede89661660945da1f151959900903b6a",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -518,26 +569,28 @@
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1741308171,
|
"lastModified": 1741308171,
|
||||||
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
|
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
|
||||||
"ref": "v9.11.1",
|
"owner": "girlbossceo",
|
||||||
|
"repo": "rocksdb",
|
||||||
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
|
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
|
||||||
"revCount": 13177,
|
"type": "github"
|
||||||
"type": "git",
|
|
||||||
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
"owner": "girlbossceo",
|
||||||
"ref": "v9.11.1",
|
"ref": "v9.11.1",
|
||||||
"type": "git",
|
"repo": "rocksdb",
|
||||||
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"attic": "attic",
|
"attic": "attic",
|
||||||
"cachix": "cachix",
|
"cachix": "cachix",
|
||||||
|
"complement": "complement",
|
||||||
"crane": "crane_2",
|
"crane": "crane_2",
|
||||||
"fenix": "fenix",
|
"fenix": "fenix",
|
||||||
"flake-compat": "flake-compat_3",
|
"flake-compat": "flake-compat_3",
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": "flake-utils",
|
||||||
|
"liburing": "liburing",
|
||||||
"nix-filter": "nix-filter",
|
"nix-filter": "nix-filter",
|
||||||
"nixpkgs": "nixpkgs_5",
|
"nixpkgs": "nixpkgs_5",
|
||||||
"rocksdb": "rocksdb"
|
"rocksdb": "rocksdb"
|
||||||
|
@ -546,11 +599,11 @@
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1751433876,
|
"lastModified": 1740691488,
|
||||||
"narHash": "sha256-IsdwOcvLLDDlkFNwhdD5BZy20okIQL01+UQ7Kxbqh8s=",
|
"narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "11d45c881389dae90b0da5a94cde52c79d0fc7ef",
|
"rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
425
flake.nix
425
flake.nix
|
@ -2,127 +2,113 @@
|
||||||
inputs = {
|
inputs = {
|
||||||
attic.url = "github:zhaofengli/attic?ref=main";
|
attic.url = "github:zhaofengli/attic?ref=main";
|
||||||
cachix.url = "github:cachix/cachix?ref=master";
|
cachix.url = "github:cachix/cachix?ref=master";
|
||||||
crane = {
|
complement = { url = "github:girlbossceo/complement?ref=main"; flake = false; };
|
||||||
url = "github:ipetkov/crane?ref=master";
|
crane = { url = "github:ipetkov/crane?ref=master"; };
|
||||||
};
|
fenix = { url = "github:nix-community/fenix?ref=main"; inputs.nixpkgs.follows = "nixpkgs"; };
|
||||||
fenix = {
|
flake-compat = { url = "github:edolstra/flake-compat?ref=master"; flake = false; };
|
||||||
url = "github:nix-community/fenix?ref=main";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
flake-compat = {
|
|
||||||
url = "github:edolstra/flake-compat?ref=master";
|
|
||||||
flake = false;
|
|
||||||
};
|
|
||||||
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
||||||
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
||||||
rocksdb = {
|
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; };
|
||||||
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=v9.11.1";
|
liburing = { url = "github:axboe/liburing?ref=master"; flake = false; };
|
||||||
flake = false;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs =
|
outputs = inputs:
|
||||||
inputs:
|
inputs.flake-utils.lib.eachDefaultSystem (system:
|
||||||
inputs.flake-utils.lib.eachDefaultSystem (
|
|
||||||
system:
|
|
||||||
let
|
let
|
||||||
pkgsHost = import inputs.nixpkgs{
|
pkgsHost = import inputs.nixpkgs{
|
||||||
inherit system;
|
inherit system;
|
||||||
};
|
};
|
||||||
|
pkgsHostStatic = pkgsHost.pkgsStatic;
|
||||||
|
|
||||||
# The Rust toolchain to use
|
# The Rust toolchain to use
|
||||||
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
|
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
|
||||||
file = ./rust-toolchain.toml;
|
file = ./rust-toolchain.toml;
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
# See also `rust-toolchain.toml`
|
||||||
sha256 = "sha256-KUm16pHj+cRedf8vxs/Hd2YWxpOrWZ7UOrwhILdSJBU=";
|
sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI=";
|
||||||
};
|
};
|
||||||
|
|
||||||
mkScope =
|
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
||||||
pkgs:
|
inherit pkgs;
|
||||||
pkgs.lib.makeScope pkgs.newScope (self: {
|
book = self.callPackage ./nix/pkgs/book {};
|
||||||
inherit pkgs inputs;
|
complement = self.callPackage ./nix/pkgs/complement {};
|
||||||
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
|
craneLib = ((inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain));
|
||||||
|
inherit inputs;
|
||||||
main = self.callPackage ./nix/pkgs/main {};
|
main = self.callPackage ./nix/pkgs/main {};
|
||||||
|
oci-image = self.callPackage ./nix/pkgs/oci-image {};
|
||||||
|
tini = pkgs.tini.overrideAttrs {
|
||||||
|
# newer clang/gcc is unhappy with tini-static: <https://3.dog/~strawberry/pb/c8y4>
|
||||||
|
patches = [ (pkgs.fetchpatch {
|
||||||
|
url = "https://patch-diff.githubusercontent.com/raw/krallin/tini/pull/224.patch";
|
||||||
|
hash = "sha256-4bTfAhRyIT71VALhHY13hUgbjLEUyvgkIJMt3w9ag3k=";
|
||||||
|
})
|
||||||
|
];
|
||||||
|
};
|
||||||
liburing = pkgs.liburing.overrideAttrs {
|
liburing = pkgs.liburing.overrideAttrs {
|
||||||
# Tests weren't building
|
# Tests weren't building
|
||||||
outputs = [
|
outputs = [ "out" "dev" "man" ];
|
||||||
"out"
|
|
||||||
"dev"
|
|
||||||
"man"
|
|
||||||
];
|
|
||||||
buildFlags = [ "library" ];
|
buildFlags = [ "library" ];
|
||||||
|
src = inputs.liburing;
|
||||||
};
|
};
|
||||||
rocksdb =
|
rocksdb = (pkgs.rocksdb.override {
|
||||||
(pkgs.rocksdb_9_10.override {
|
liburing = self.liburing;
|
||||||
# Override the liburing input for the build with our own so
|
}).overrideAttrs (old: {
|
||||||
# we have it built with the library flag
|
|
||||||
inherit (self) liburing;
|
|
||||||
}).overrideAttrs
|
|
||||||
(old: {
|
|
||||||
src = inputs.rocksdb;
|
src = inputs.rocksdb;
|
||||||
version = "v9.11.1";
|
version = pkgs.lib.removePrefix
|
||||||
cmakeFlags =
|
"v"
|
||||||
pkgs.lib.subtractLists [
|
(builtins.fromJSON (builtins.readFile ./flake.lock))
|
||||||
# No real reason to have snappy or zlib, no one uses this
|
.nodes.rocksdb.original.ref;
|
||||||
|
# we have this already at https://github.com/girlbossceo/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
|
||||||
|
# unsetting this so i don't have to revert it and make this nix exclusive
|
||||||
|
patches = [];
|
||||||
|
cmakeFlags = pkgs.lib.subtractLists
|
||||||
|
[
|
||||||
|
# no real reason to have snappy or zlib, no one uses this
|
||||||
"-DWITH_SNAPPY=1"
|
"-DWITH_SNAPPY=1"
|
||||||
"-DZLIB=1"
|
"-DZLIB=1"
|
||||||
"-DWITH_ZLIB=1"
|
"-DWITH_ZLIB=1"
|
||||||
# We don't need to use ldb or sst_dump (core_tools)
|
# we dont need to use ldb or sst_dump (core_tools)
|
||||||
"-DWITH_CORE_TOOLS=1"
|
"-DWITH_CORE_TOOLS=1"
|
||||||
# We don't need to build rocksdb tests
|
# we dont need to build rocksdb tests
|
||||||
"-DWITH_TESTS=1"
|
"-DWITH_TESTS=1"
|
||||||
# We use rust-rocksdb via C interface and don't need C++ RTTI
|
# we use rust-rocksdb via C interface and dont need C++ RTTI
|
||||||
"-DUSE_RTTI=1"
|
"-DUSE_RTTI=1"
|
||||||
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
|
# this doesn't exist in RocksDB, and USE_SSE is deprecated for
|
||||||
# PORTABLE=$(march)
|
# PORTABLE=$(march)
|
||||||
"-DFORCE_SSE42=1"
|
"-DFORCE_SSE42=1"
|
||||||
# PORTABLE will get set in main/default.nix
|
# PORTABLE will get set in main/default.nix
|
||||||
"-DPORTABLE=1"
|
"-DPORTABLE=1"
|
||||||
] old.cmakeFlags
|
]
|
||||||
|
old.cmakeFlags
|
||||||
++ [
|
++ [
|
||||||
# No real reason to have snappy, no one uses this
|
# no real reason to have snappy, no one uses this
|
||||||
"-DWITH_SNAPPY=0"
|
"-DWITH_SNAPPY=0"
|
||||||
"-DZLIB=0"
|
"-DZLIB=0"
|
||||||
"-DWITH_ZLIB=0"
|
"-DWITH_ZLIB=0"
|
||||||
# We don't need to use ldb or sst_dump (core_tools)
|
# we dont need to use ldb or sst_dump (core_tools)
|
||||||
"-DWITH_CORE_TOOLS=0"
|
"-DWITH_CORE_TOOLS=0"
|
||||||
# We don't need trace tools
|
# we dont need trace tools
|
||||||
"-DWITH_TRACE_TOOLS=0"
|
"-DWITH_TRACE_TOOLS=0"
|
||||||
# We don't need to build rocksdb tests
|
# we dont need to build rocksdb tests
|
||||||
"-DWITH_TESTS=0"
|
"-DWITH_TESTS=0"
|
||||||
# We use rust-rocksdb via C interface and don't need C++ RTTI
|
# we use rust-rocksdb via C interface and dont need C++ RTTI
|
||||||
"-DUSE_RTTI=0"
|
"-DUSE_RTTI=0"
|
||||||
];
|
];
|
||||||
|
|
||||||
# outputs has "tools" which we don't need or use
|
# outputs has "tools" which we dont need or use
|
||||||
outputs = [ "out" ];
|
outputs = [ "out" ];
|
||||||
|
|
||||||
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
|
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
|
||||||
preInstall = "";
|
preInstall = "";
|
||||||
|
|
||||||
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
|
|
||||||
# Unsetting this so we don't have to revert it and make this nix exclusive
|
|
||||||
patches = [ ];
|
|
||||||
|
|
||||||
postPatch = ''
|
|
||||||
# Fix gcc-13 build failures due to missing <cstdint> and
|
|
||||||
# <system_error> includes, fixed upstream since 8.x
|
|
||||||
sed -e '1i #include <cstdint>' -i db/compaction/compaction_iteration_stats.h
|
|
||||||
sed -e '1i #include <cstdint>' -i table/block_based/data_block_hash_index.h
|
|
||||||
sed -e '1i #include <cstdint>' -i util/string_util.h
|
|
||||||
sed -e '1i #include <cstdint>' -i include/rocksdb/utilities/checkpoint.h
|
|
||||||
'';
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
scopeHost = mkScope pkgsHost;
|
scopeHost = mkScope pkgsHost;
|
||||||
mkCrossScope =
|
scopeHostStatic = mkScope pkgsHostStatic;
|
||||||
crossSystem:
|
scopeCrossLinux = mkScope pkgsHost.pkgsLinux.pkgsStatic;
|
||||||
let
|
mkCrossScope = crossSystem:
|
||||||
pkgsCrossStatic =
|
let pkgsCrossStatic = (import inputs.nixpkgs {
|
||||||
(import inputs.nixpkgs {
|
|
||||||
inherit system;
|
inherit system;
|
||||||
crossSystem = {
|
crossSystem = {
|
||||||
config = crossSystem;
|
config = crossSystem;
|
||||||
|
@ -131,19 +117,85 @@
|
||||||
in
|
in
|
||||||
mkScope pkgsCrossStatic;
|
mkScope pkgsCrossStatic;
|
||||||
|
|
||||||
|
mkDevShell = scope: scope.pkgs.mkShell {
|
||||||
|
env = scope.main.env // {
|
||||||
|
# Rust Analyzer needs to be able to find the path to default crate
|
||||||
|
# sources, and it can read this environment variable to do so. The
|
||||||
|
# `rust-src` component is required in order for this to work.
|
||||||
|
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||||
|
|
||||||
|
# Convenient way to access a pinned version of Complement's source
|
||||||
|
# code.
|
||||||
|
COMPLEMENT_SRC = inputs.complement.outPath;
|
||||||
|
|
||||||
|
# Needed for Complement: <https://github.com/golang/go/issues/52690>
|
||||||
|
CGO_CFLAGS = "-Wl,--no-gc-sections";
|
||||||
|
CGO_LDFLAGS = "-Wl,--no-gc-sections";
|
||||||
|
};
|
||||||
|
|
||||||
|
# Development tools
|
||||||
|
packages = [
|
||||||
|
# Always use nightly rustfmt because most of its options are unstable
|
||||||
|
#
|
||||||
|
# This needs to come before `toolchain` in this list, otherwise
|
||||||
|
# `$PATH` will have stable rustfmt instead.
|
||||||
|
inputs.fenix.packages.${system}.latest.rustfmt
|
||||||
|
|
||||||
|
toolchain
|
||||||
|
]
|
||||||
|
++ (with pkgsHost.pkgs; [
|
||||||
|
# Required by hardened-malloc.rs dep
|
||||||
|
binutils
|
||||||
|
|
||||||
|
cargo-audit
|
||||||
|
cargo-auditable
|
||||||
|
|
||||||
|
# Needed for producing Debian packages
|
||||||
|
cargo-deb
|
||||||
|
|
||||||
|
# Needed for CI to check validity of produced Debian packages (dpkg-deb)
|
||||||
|
dpkg
|
||||||
|
|
||||||
|
engage
|
||||||
|
|
||||||
|
# Needed for Complement
|
||||||
|
go
|
||||||
|
|
||||||
|
# Needed for our script for Complement
|
||||||
|
jq
|
||||||
|
gotestfmt
|
||||||
|
|
||||||
|
# Needed for finding broken markdown links
|
||||||
|
lychee
|
||||||
|
|
||||||
|
# Needed for linting markdown files
|
||||||
|
markdownlint-cli
|
||||||
|
|
||||||
|
# Useful for editing the book locally
|
||||||
|
mdbook
|
||||||
|
|
||||||
|
# used for rust caching in CI to speed it up
|
||||||
|
sccache
|
||||||
|
]
|
||||||
|
# liburing is Linux-exclusive
|
||||||
|
++ lib.optional stdenv.hostPlatform.isLinux liburing
|
||||||
|
++ lib.optional stdenv.hostPlatform.isLinux numactl)
|
||||||
|
++ scope.main.buildInputs
|
||||||
|
++ scope.main.propagatedBuildInputs
|
||||||
|
++ scope.main.nativeBuildInputs;
|
||||||
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages =
|
packages = {
|
||||||
{
|
|
||||||
default = scopeHost.main.override {
|
default = scopeHost.main.override {
|
||||||
disable_features = [
|
disable_features = [
|
||||||
# Don't include experimental features
|
# dont include experimental features
|
||||||
"experimental"
|
"experimental"
|
||||||
# jemalloc profiling/stats features are expensive and shouldn't
|
# jemalloc profiling/stats features are expensive and shouldn't
|
||||||
# be expected on non-debug builds.
|
# be expected on non-debug builds.
|
||||||
"jemalloc_prof"
|
"jemalloc_prof"
|
||||||
"jemalloc_stats"
|
"jemalloc_stats"
|
||||||
# This is non-functional on nix for some reason
|
# this is non-functional on nix for some reason
|
||||||
"hardened_malloc"
|
"hardened_malloc"
|
||||||
# conduwuit_mods is a development-only hot reload feature
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
"conduwuit_mods"
|
"conduwuit_mods"
|
||||||
|
@ -151,23 +203,23 @@
|
||||||
};
|
};
|
||||||
default-debug = scopeHost.main.override {
|
default-debug = scopeHost.main.override {
|
||||||
profile = "dev";
|
profile = "dev";
|
||||||
# Debug build users expect full logs
|
# debug build users expect full logs
|
||||||
disable_release_max_log_level = true;
|
disable_release_max_log_level = true;
|
||||||
disable_features = [
|
disable_features = [
|
||||||
# Don't include experimental features
|
# dont include experimental features
|
||||||
"experimental"
|
"experimental"
|
||||||
# This is non-functional on nix for some reason
|
# this is non-functional on nix for some reason
|
||||||
"hardened_malloc"
|
"hardened_malloc"
|
||||||
# conduwuit_mods is a development-only hot reload feature
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
"conduwuit_mods"
|
"conduwuit_mods"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
# Just a test profile used for things like CI and complement
|
# just a test profile used for things like CI and complement
|
||||||
default-test = scopeHost.main.override {
|
default-test = scopeHost.main.override {
|
||||||
profile = "test";
|
profile = "test";
|
||||||
disable_release_max_log_level = true;
|
disable_release_max_log_level = true;
|
||||||
disable_features = [
|
disable_features = [
|
||||||
# Don't include experimental features
|
# dont include experimental features
|
||||||
"experimental"
|
"experimental"
|
||||||
# this is non-functional on nix for some reason
|
# this is non-functional on nix for some reason
|
||||||
"hardened_malloc"
|
"hardened_malloc"
|
||||||
|
@ -178,13 +230,13 @@
|
||||||
all-features = scopeHost.main.override {
|
all-features = scopeHost.main.override {
|
||||||
all_features = true;
|
all_features = true;
|
||||||
disable_features = [
|
disable_features = [
|
||||||
# Don't include experimental features
|
# dont include experimental features
|
||||||
"experimental"
|
"experimental"
|
||||||
# jemalloc profiling/stats features are expensive and shouldn't
|
# jemalloc profiling/stats features are expensive and shouldn't
|
||||||
# be expected on non-debug builds.
|
# be expected on non-debug builds.
|
||||||
"jemalloc_prof"
|
"jemalloc_prof"
|
||||||
"jemalloc_stats"
|
"jemalloc_stats"
|
||||||
# This is non-functional on nix for some reason
|
# this is non-functional on nix for some reason
|
||||||
"hardened_malloc"
|
"hardened_malloc"
|
||||||
# conduwuit_mods is a development-only hot reload feature
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
"conduwuit_mods"
|
"conduwuit_mods"
|
||||||
|
@ -193,24 +245,71 @@
|
||||||
all-features-debug = scopeHost.main.override {
|
all-features-debug = scopeHost.main.override {
|
||||||
profile = "dev";
|
profile = "dev";
|
||||||
all_features = true;
|
all_features = true;
|
||||||
# Debug build users expect full logs
|
# debug build users expect full logs
|
||||||
disable_release_max_log_level = true;
|
disable_release_max_log_level = true;
|
||||||
disable_features = [
|
disable_features = [
|
||||||
# Don't include experimental features
|
# dont include experimental features
|
||||||
"experimental"
|
"experimental"
|
||||||
# This is non-functional on nix for some reason
|
# this is non-functional on nix for some reason
|
||||||
"hardened_malloc"
|
"hardened_malloc"
|
||||||
# conduwuit_mods is a development-only hot reload feature
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
"conduwuit_mods"
|
"conduwuit_mods"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; };
|
hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; };
|
||||||
|
|
||||||
|
oci-image = scopeHost.oci-image;
|
||||||
|
oci-image-all-features = scopeHost.oci-image.override {
|
||||||
|
main = scopeHost.main.override {
|
||||||
|
all_features = true;
|
||||||
|
disable_features = [
|
||||||
|
# dont include experimental features
|
||||||
|
"experimental"
|
||||||
|
# jemalloc profiling/stats features are expensive and shouldn't
|
||||||
|
# be expected on non-debug builds.
|
||||||
|
"jemalloc_prof"
|
||||||
|
"jemalloc_stats"
|
||||||
|
# this is non-functional on nix for some reason
|
||||||
|
"hardened_malloc"
|
||||||
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
|
"conduwuit_mods"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
oci-image-all-features-debug = scopeHost.oci-image.override {
|
||||||
|
main = scopeHost.main.override {
|
||||||
|
profile = "dev";
|
||||||
|
all_features = true;
|
||||||
|
# debug build users expect full logs
|
||||||
|
disable_release_max_log_level = true;
|
||||||
|
disable_features = [
|
||||||
|
# dont include experimental features
|
||||||
|
"experimental"
|
||||||
|
# this is non-functional on nix for some reason
|
||||||
|
"hardened_malloc"
|
||||||
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
|
"conduwuit_mods"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
oci-image-hmalloc = scopeHost.oci-image.override {
|
||||||
|
main = scopeHost.main.override {
|
||||||
|
features = ["hardened_malloc"];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
book = scopeHost.book;
|
||||||
|
|
||||||
|
complement = scopeHost.complement;
|
||||||
|
static-complement = scopeHostStatic.complement;
|
||||||
|
# macOS containers don't exist, so the complement images must be forced to linux
|
||||||
|
linux-complement = (mkCrossScope "${pkgsHost.hostPlatform.qemuArch}-linux-musl").complement;
|
||||||
}
|
}
|
||||||
// builtins.listToAttrs (
|
//
|
||||||
builtins.concatLists (
|
builtins.listToAttrs
|
||||||
builtins.map
|
(builtins.concatLists
|
||||||
(
|
(builtins.map
|
||||||
crossSystem:
|
(crossSystem:
|
||||||
let
|
let
|
||||||
binaryName = "static-${crossSystem}";
|
binaryName = "static-${crossSystem}";
|
||||||
scopeCrossStatic = mkCrossScope crossSystem;
|
scopeCrossStatic = mkCrossScope crossSystem;
|
||||||
|
@ -227,8 +326,7 @@
|
||||||
{
|
{
|
||||||
name = "${binaryName}-x86_64-haswell-optimised";
|
name = "${binaryName}-x86_64-haswell-optimised";
|
||||||
value = scopeCrossStatic.main.override {
|
value = scopeCrossStatic.main.override {
|
||||||
x86_64_haswell_target_optimised =
|
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
|
||||||
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,8 +396,7 @@
|
||||||
# conduwuit_mods is a development-only hot reload feature
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
"conduwuit_mods"
|
"conduwuit_mods"
|
||||||
];
|
];
|
||||||
x86_64_haswell_target_optimised =
|
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
|
||||||
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,6 +426,118 @@
|
||||||
features = ["hardened_malloc"];
|
features = ["hardened_malloc"];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that binary
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}";
|
||||||
|
value = scopeCrossStatic.oci-image;
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that binary with x86_64 haswell
|
||||||
|
# target optimisations
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}-x86_64-haswell-optimised";
|
||||||
|
value = scopeCrossStatic.oci-image.override {
|
||||||
|
main = scopeCrossStatic.main.override {
|
||||||
|
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that unstripped debug ("dev") binary
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}-debug";
|
||||||
|
value = scopeCrossStatic.oci-image.override {
|
||||||
|
main = scopeCrossStatic.main.override {
|
||||||
|
profile = "dev";
|
||||||
|
# debug build users expect full logs
|
||||||
|
disable_release_max_log_level = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that binary with `--all-features`
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}-all-features";
|
||||||
|
value = scopeCrossStatic.oci-image.override {
|
||||||
|
main = scopeCrossStatic.main.override {
|
||||||
|
all_features = true;
|
||||||
|
disable_features = [
|
||||||
|
# dont include experimental features
|
||||||
|
"experimental"
|
||||||
|
# jemalloc profiling/stats features are expensive and shouldn't
|
||||||
|
# be expected on non-debug builds.
|
||||||
|
"jemalloc_prof"
|
||||||
|
"jemalloc_stats"
|
||||||
|
# this is non-functional on nix for some reason
|
||||||
|
"hardened_malloc"
|
||||||
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
|
"conduwuit_mods"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that binary with `--all-features` and with x86_64 haswell
|
||||||
|
# target optimisations
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}-all-features-x86_64-haswell-optimised";
|
||||||
|
value = scopeCrossStatic.oci-image.override {
|
||||||
|
main = scopeCrossStatic.main.override {
|
||||||
|
all_features = true;
|
||||||
|
disable_features = [
|
||||||
|
# dont include experimental features
|
||||||
|
"experimental"
|
||||||
|
# jemalloc profiling/stats features are expensive and shouldn't
|
||||||
|
# be expected on non-debug builds.
|
||||||
|
"jemalloc_prof"
|
||||||
|
"jemalloc_stats"
|
||||||
|
# this is non-functional on nix for some reason
|
||||||
|
"hardened_malloc"
|
||||||
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
|
"conduwuit_mods"
|
||||||
|
];
|
||||||
|
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that unstripped debug ("dev") binary with `--all-features`
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}-all-features-debug";
|
||||||
|
value = scopeCrossStatic.oci-image.override {
|
||||||
|
main = scopeCrossStatic.main.override {
|
||||||
|
profile = "dev";
|
||||||
|
all_features = true;
|
||||||
|
# debug build users expect full logs
|
||||||
|
disable_release_max_log_level = true;
|
||||||
|
disable_features = [
|
||||||
|
# dont include experimental features
|
||||||
|
"experimental"
|
||||||
|
# this is non-functional on nix for some reason
|
||||||
|
"hardened_malloc"
|
||||||
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
|
"conduwuit_mods"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for an OCI image based on that binary with hardened_malloc
|
||||||
|
{
|
||||||
|
name = "oci-image-${crossSystem}-hmalloc";
|
||||||
|
value = scopeCrossStatic.oci-image.override {
|
||||||
|
main = scopeCrossStatic.main.override {
|
||||||
|
features = ["hardened_malloc"];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
# An output for a complement OCI image for the specified platform
|
||||||
|
{
|
||||||
|
name = "complement-${crossSystem}";
|
||||||
|
value = scopeCrossStatic.complement;
|
||||||
|
}
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
[
|
[
|
||||||
|
@ -340,6 +549,30 @@
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
|
||||||
);
|
devShells.default = mkDevShell scopeHostStatic;
|
||||||
|
devShells.all-features = mkDevShell
|
||||||
|
(scopeHostStatic.overrideScope (final: prev: {
|
||||||
|
main = prev.main.override {
|
||||||
|
all_features = true;
|
||||||
|
disable_features = [
|
||||||
|
# dont include experimental features
|
||||||
|
"experimental"
|
||||||
|
# jemalloc profiling/stats features are expensive and shouldn't
|
||||||
|
# be expected on non-debug builds.
|
||||||
|
"jemalloc_prof"
|
||||||
|
"jemalloc_stats"
|
||||||
|
# this is non-functional on nix for some reason
|
||||||
|
"hardened_malloc"
|
||||||
|
# conduwuit_mods is a development-only hot reload feature
|
||||||
|
"conduwuit_mods"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}));
|
||||||
|
devShells.no-features = mkDevShell
|
||||||
|
(scopeHostStatic.overrideScope (final: prev: {
|
||||||
|
main = prev.main.override { default_features = false; };
|
||||||
|
}));
|
||||||
|
devShells.dynamic = mkDevShell scopeHost;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
36
nix/pkgs/book/default.nix
Normal file
36
nix/pkgs/book/default.nix
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
{ inputs
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
, main
|
||||||
|
, mdbook
|
||||||
|
, stdenv
|
||||||
|
}:
|
||||||
|
|
||||||
|
stdenv.mkDerivation {
|
||||||
|
inherit (main) pname version;
|
||||||
|
|
||||||
|
src = inputs.nix-filter {
|
||||||
|
root = inputs.self;
|
||||||
|
include = [
|
||||||
|
"book.toml"
|
||||||
|
"conduwuit-example.toml"
|
||||||
|
"CODE_OF_CONDUCT.md"
|
||||||
|
"CONTRIBUTING.md"
|
||||||
|
"README.md"
|
||||||
|
"development.md"
|
||||||
|
"debian/conduwuit.service"
|
||||||
|
"debian/README.md"
|
||||||
|
"arch/conduwuit.service"
|
||||||
|
"docs"
|
||||||
|
"theme"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
mdbook
|
||||||
|
];
|
||||||
|
|
||||||
|
buildPhase = ''
|
||||||
|
mdbook build -d $out
|
||||||
|
'';
|
||||||
|
}
|
21
nix/pkgs/complement/certificate.crt
Normal file
21
nix/pkgs/complement/certificate.crt
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL
|
||||||
|
BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz
|
||||||
|
IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy
|
||||||
|
NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m
|
||||||
|
ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||||
|
AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt
|
||||||
|
/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88
|
||||||
|
awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp
|
||||||
|
L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK
|
||||||
|
K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl
|
||||||
|
8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV
|
||||||
|
HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy
|
||||||
|
ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw
|
||||||
|
DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ
|
||||||
|
irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+
|
||||||
|
HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e
|
||||||
|
VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3
|
||||||
|
y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d
|
||||||
|
jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4=
|
||||||
|
-----END CERTIFICATE-----
|
50
nix/pkgs/complement/config.toml
Normal file
50
nix/pkgs/complement/config.toml
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
[global]
|
||||||
|
address = "0.0.0.0"
|
||||||
|
allow_device_name_federation = true
|
||||||
|
allow_guest_registration = true
|
||||||
|
allow_public_room_directory_over_federation = true
|
||||||
|
allow_public_room_directory_without_auth = true
|
||||||
|
allow_registration = true
|
||||||
|
database_path = "/database"
|
||||||
|
log = "trace,h2=debug,hyper=debug"
|
||||||
|
port = [8008, 8448]
|
||||||
|
trusted_servers = []
|
||||||
|
only_query_trusted_key_servers = false
|
||||||
|
query_trusted_key_servers_first = false
|
||||||
|
query_trusted_key_servers_first_on_join = false
|
||||||
|
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true
|
||||||
|
ip_range_denylist = []
|
||||||
|
url_preview_domain_contains_allowlist = ["*"]
|
||||||
|
url_preview_domain_explicit_denylist = ["*"]
|
||||||
|
media_compat_file_link = false
|
||||||
|
media_startup_check = true
|
||||||
|
prune_missing_media = true
|
||||||
|
log_colors = true
|
||||||
|
admin_room_notices = false
|
||||||
|
allow_check_for_updates = false
|
||||||
|
intentionally_unknown_config_option_for_testing = true
|
||||||
|
rocksdb_log_level = "info"
|
||||||
|
rocksdb_max_log_files = 1
|
||||||
|
rocksdb_recovery_mode = 0
|
||||||
|
rocksdb_paranoid_file_checks = true
|
||||||
|
log_guest_registrations = false
|
||||||
|
allow_legacy_media = true
|
||||||
|
startup_netburst = true
|
||||||
|
startup_netburst_keep = -1
|
||||||
|
|
||||||
|
allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true
|
||||||
|
|
||||||
|
# valgrind makes things so slow
|
||||||
|
dns_timeout = 60
|
||||||
|
dns_attempts = 20
|
||||||
|
request_conn_timeout = 60
|
||||||
|
request_timeout = 120
|
||||||
|
well_known_conn_timeout = 60
|
||||||
|
well_known_timeout = 60
|
||||||
|
federation_idle_timeout = 300
|
||||||
|
sender_timeout = 300
|
||||||
|
sender_idle_timeout = 300
|
||||||
|
sender_retry_backoff_limit = 300
|
||||||
|
|
||||||
|
[global.tls]
|
||||||
|
dual_protocol = true
|
89
nix/pkgs/complement/default.nix
Normal file
89
nix/pkgs/complement/default.nix
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
# Dependencies
|
||||||
|
{ bashInteractive
|
||||||
|
, buildEnv
|
||||||
|
, coreutils
|
||||||
|
, dockerTools
|
||||||
|
, lib
|
||||||
|
, main
|
||||||
|
, stdenv
|
||||||
|
, tini
|
||||||
|
, writeShellScriptBin
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
main' = main.override {
|
||||||
|
profile = "test";
|
||||||
|
all_features = true;
|
||||||
|
disable_release_max_log_level = true;
|
||||||
|
disable_features = [
|
||||||
|
# console/CLI stuff isn't used or relevant for complement
|
||||||
|
"console"
|
||||||
|
"tokio_console"
|
||||||
|
# sentry telemetry isn't useful for complement, disabled by default anyways
|
||||||
|
"sentry_telemetry"
|
||||||
|
"perf_measurements"
|
||||||
|
# this is non-functional on nix for some reason
|
||||||
|
"hardened_malloc"
|
||||||
|
# dont include experimental features
|
||||||
|
"experimental"
|
||||||
|
# compression isn't needed for complement
|
||||||
|
"brotli_compression"
|
||||||
|
"gzip_compression"
|
||||||
|
"zstd_compression"
|
||||||
|
# complement doesn't need hot reloading
|
||||||
|
"conduwuit_mods"
|
||||||
|
# complement doesn't have URL preview media tests
|
||||||
|
"url_preview"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
start = writeShellScriptBin "start" ''
|
||||||
|
set -euxo pipefail
|
||||||
|
|
||||||
|
${lib.getExe' coreutils "env"} \
|
||||||
|
CONDUWUIT_SERVER_NAME="$SERVER_NAME" \
|
||||||
|
${lib.getExe main'}
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
|
||||||
|
dockerTools.buildImage {
|
||||||
|
name = "complement-conduwuit";
|
||||||
|
tag = "main";
|
||||||
|
|
||||||
|
copyToRoot = buildEnv {
|
||||||
|
name = "root";
|
||||||
|
pathsToLink = [
|
||||||
|
"/bin"
|
||||||
|
];
|
||||||
|
paths = [
|
||||||
|
bashInteractive
|
||||||
|
coreutils
|
||||||
|
main'
|
||||||
|
start
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
Cmd = [
|
||||||
|
"${lib.getExe start}"
|
||||||
|
];
|
||||||
|
|
||||||
|
Entrypoint = if !stdenv.hostPlatform.isDarwin
|
||||||
|
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||||
|
# are handled as expected
|
||||||
|
then [ "${lib.getExe' tini "tini"}" "--" ]
|
||||||
|
else [];
|
||||||
|
|
||||||
|
Env = [
|
||||||
|
"CONTINUWUITY_TLS__KEY=${./private_key.key}"
|
||||||
|
"CONTINUWUITY_TLS__CERTS=${./certificate.crt}"
|
||||||
|
"CONTINUWUITY_CONFIG=${./config.toml}"
|
||||||
|
"RUST_BACKTRACE=full"
|
||||||
|
];
|
||||||
|
|
||||||
|
ExposedPorts = {
|
||||||
|
"8008/tcp" = {};
|
||||||
|
"8448/tcp" = {};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
28
nix/pkgs/complement/private_key.key
Normal file
28
nix/pkgs/complement/private_key.key
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb
|
||||||
|
iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT
|
||||||
|
LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a
|
||||||
|
09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc
|
||||||
|
ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga
|
||||||
|
Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO
|
||||||
|
/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu
|
||||||
|
WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB
|
||||||
|
DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb
|
||||||
|
piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN
|
||||||
|
D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ
|
||||||
|
8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+
|
||||||
|
3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq
|
||||||
|
/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90
|
||||||
|
FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q
|
||||||
|
td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M
|
||||||
|
Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A
|
||||||
|
91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV
|
||||||
|
8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh
|
||||||
|
VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW
|
||||||
|
UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K
|
||||||
|
kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz
|
||||||
|
KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7
|
||||||
|
IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh
|
||||||
|
tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM
|
||||||
|
9MVtdgSkuh2gwkD/mMoAJXM=
|
||||||
|
-----END PRIVATE KEY-----
|
16
nix/pkgs/complement/signing_request.csr
Normal file
16
nix/pkgs/complement/signing_request.csr
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
-----BEGIN CERTIFICATE REQUEST-----
|
||||||
|
MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK
|
||||||
|
DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||||
|
ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH
|
||||||
|
uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR
|
||||||
|
xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb
|
||||||
|
o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B
|
||||||
|
hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe
|
||||||
|
vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB
|
||||||
|
CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79
|
||||||
|
ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3
|
||||||
|
r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb
|
||||||
|
XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK
|
||||||
|
MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76
|
||||||
|
U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/
|
||||||
|
-----END CERTIFICATE REQUEST-----
|
12
nix/pkgs/complement/v3.ext
Normal file
12
nix/pkgs/complement/v3.ext
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
authorityKeyIdentifier=keyid,issuer
|
||||||
|
basicConstraints=CA:FALSE
|
||||||
|
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||||
|
subjectAltName = @alt_names
|
||||||
|
|
||||||
|
[alt_names]
|
||||||
|
DNS.1 = *.docker.internal
|
||||||
|
DNS.2 = hs1
|
||||||
|
DNS.3 = hs2
|
||||||
|
DNS.4 = hs3
|
||||||
|
DNS.5 = hs4
|
||||||
|
IP.1 = 127.0.0.1
|
|
@ -4,8 +4,7 @@
|
||||||
, stdenv
|
, stdenv
|
||||||
}:
|
}:
|
||||||
|
|
||||||
lib.optionalAttrs stdenv.hostPlatform.isStatic
|
lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
||||||
{
|
|
||||||
ROCKSDB_STATIC = "";
|
ROCKSDB_STATIC = "";
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
|
@ -13,7 +12,12 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic
|
||||||
CARGO_BUILD_RUSTFLAGS =
|
CARGO_BUILD_RUSTFLAGS =
|
||||||
lib.concatStringsSep
|
lib.concatStringsSep
|
||||||
" "
|
" "
|
||||||
(lib.optionals
|
([]
|
||||||
|
# This disables PIE for static builds, which isn't great in terms
|
||||||
|
# of security. Unfortunately, my hand is forced because nixpkgs'
|
||||||
|
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||||
|
# leaving PIE enabled.
|
||||||
|
++ lib.optionals
|
||||||
stdenv.hostPlatform.isStatic
|
stdenv.hostPlatform.isStatic
|
||||||
[ "-C" "relocation-model=static" ]
|
[ "-C" "relocation-model=static" ]
|
||||||
++ lib.optionals
|
++ lib.optionals
|
||||||
|
|
|
@ -93,16 +93,14 @@ let
|
||||||
|
|
||||||
# for some reason enableLiburing in nixpkgs rocksdb is default true
|
# for some reason enableLiburing in nixpkgs rocksdb is default true
|
||||||
# which breaks Darwin entirely
|
# which breaks Darwin entirely
|
||||||
inherit enableLiburing;
|
enableLiburing = enableLiburing;
|
||||||
}).overrideAttrs (old: {
|
}).overrideAttrs (old: {
|
||||||
inherit enableLiburing;
|
enableLiburing = enableLiburing;
|
||||||
cmakeFlags = (if x86_64_haswell_target_optimised then
|
cmakeFlags = (if x86_64_haswell_target_optimised then (lib.subtractLists [
|
||||||
(lib.subtractLists [
|
|
||||||
# dont make a portable build if x86_64_haswell_target_optimised is enabled
|
# dont make a portable build if x86_64_haswell_target_optimised is enabled
|
||||||
"-DPORTABLE=1"
|
"-DPORTABLE=1"
|
||||||
]
|
] old.cmakeFlags
|
||||||
old.cmakeFlags
|
++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ])
|
||||||
++ [ "-DPORTABLE=haswell" ]) else [ "-DPORTABLE=1" ]
|
|
||||||
)
|
)
|
||||||
++ old.cmakeFlags;
|
++ old.cmakeFlags;
|
||||||
|
|
||||||
|
@ -162,7 +160,6 @@ let
|
||||||
"Cargo.lock"
|
"Cargo.lock"
|
||||||
"Cargo.toml"
|
"Cargo.toml"
|
||||||
"src"
|
"src"
|
||||||
"xtask"
|
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
46
nix/pkgs/oci-image/default.nix
Normal file
46
nix/pkgs/oci-image/default.nix
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
{ inputs
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
, dockerTools
|
||||||
|
, lib
|
||||||
|
, main
|
||||||
|
, stdenv
|
||||||
|
, tini
|
||||||
|
}:
|
||||||
|
|
||||||
|
dockerTools.buildLayeredImage {
|
||||||
|
name = main.pname;
|
||||||
|
tag = "main";
|
||||||
|
created = "@${toString inputs.self.lastModified}";
|
||||||
|
contents = [
|
||||||
|
dockerTools.caCertificates
|
||||||
|
main
|
||||||
|
];
|
||||||
|
config = {
|
||||||
|
Entrypoint = if !stdenv.hostPlatform.isDarwin
|
||||||
|
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||||
|
# are handled as expected
|
||||||
|
then [ "${lib.getExe' tini "tini"}" "--" ]
|
||||||
|
else [];
|
||||||
|
Cmd = [
|
||||||
|
"${lib.getExe main}"
|
||||||
|
];
|
||||||
|
Env = [
|
||||||
|
"RUST_BACKTRACE=full"
|
||||||
|
];
|
||||||
|
Labels = {
|
||||||
|
"org.opencontainers.image.authors" = "June Clementine Strawberry <june@girlboss.ceo> and Jason Volk
|
||||||
|
<jason@zemos.net>";
|
||||||
|
"org.opencontainers.image.created" ="@${toString inputs.self.lastModified}";
|
||||||
|
"org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust";
|
||||||
|
"org.opencontainers.image.documentation" = "https://continuwuity.org/";
|
||||||
|
"org.opencontainers.image.licenses" = "Apache-2.0";
|
||||||
|
"org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or "";
|
||||||
|
"org.opencontainers.image.source" = "https://forgejo.ellis.link/continuwuation/continuwuity";
|
||||||
|
"org.opencontainers.image.title" = main.pname;
|
||||||
|
"org.opencontainers.image.url" = "https://continuwuity.org/";
|
||||||
|
"org.opencontainers.image.vendor" = "continuwuation";
|
||||||
|
"org.opencontainers.image.version" = main.version;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
|
@ -10,7 +10,7 @@ use crate::{
|
||||||
|
|
||||||
#[derive(Debug, Parser)]
|
#[derive(Debug, Parser)]
|
||||||
#[command(name = conduwuit_core::name(), version = conduwuit_core::version())]
|
#[command(name = conduwuit_core::name(), version = conduwuit_core::version())]
|
||||||
pub enum AdminCommand {
|
pub(super) enum AdminCommand {
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
/// - Commands for managing appservices
|
/// - Commands for managing appservices
|
||||||
Appservices(AppserviceCommand),
|
Appservices(AppserviceCommand),
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::admin_command_dispatch;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
pub enum AppserviceCommand {
|
pub(super) enum AppserviceCommand {
|
||||||
/// - Register an appservice using its registration YAML
|
/// - Register an appservice using its registration YAML
|
||||||
///
|
///
|
||||||
/// This command needs a YAML generated by an appservice (such as a bridge),
|
/// This command needs a YAML generated by an appservice (such as a bridge),
|
||||||
|
|
|
@ -7,6 +7,6 @@ use crate::admin_command_dispatch;
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum CheckCommand {
|
pub(super) enum CheckCommand {
|
||||||
CheckAllUsers,
|
CheckAllUsers,
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,10 +7,7 @@ use std::{
|
||||||
|
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug_error, err, info,
|
Err, Result, debug_error, err, info,
|
||||||
matrix::{
|
matrix::pdu::{PduEvent, PduId, RawPduId},
|
||||||
Event,
|
|
||||||
pdu::{PduEvent, PduId, RawPduId},
|
|
||||||
},
|
|
||||||
trace, utils,
|
trace, utils,
|
||||||
utils::{
|
utils::{
|
||||||
stream::{IterStream, ReadyExt},
|
stream::{IterStream, ReadyExt},
|
||||||
|
@ -22,7 +19,7 @@ use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
||||||
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
|
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
|
||||||
api::federation::event::get_room_state, events::AnyStateEvent, serde::Raw,
|
api::federation::event::get_room_state,
|
||||||
};
|
};
|
||||||
use service::rooms::{
|
use service::rooms::{
|
||||||
short::{ShortEventId, ShortRoomId},
|
short::{ShortEventId, ShortRoomId},
|
||||||
|
@ -299,12 +296,12 @@ pub(super) async fn get_remote_pdu(
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
|
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
let room_id = self.services.rooms.alias.resolve(&room).await?;
|
let room_id = self.services.rooms.alias.resolve(&room).await?;
|
||||||
let room_state: Vec<Raw<AnyStateEvent>> = self
|
let room_state: Vec<_> = self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_full_pdus(&room_id)
|
.room_state_full_pdus(&room_id)
|
||||||
.map_ok(Event::into_format)
|
.map_ok(PduEvent::into_state_event)
|
||||||
.try_collect()
|
.try_collect()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -412,9 +409,7 @@ pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool)
|
||||||
.reload
|
.reload
|
||||||
.reload(&new_filter_layer, Some(handles))
|
.reload(&new_filter_layer, Some(handles))
|
||||||
{
|
{
|
||||||
| Ok(()) => {
|
| Ok(()) => return self.write_str("Successfully changed log level").await,
|
||||||
return self.write_str("Successfully changed log level").await;
|
|
||||||
},
|
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
return Err!("Failed to modify and reload the global tracing log level: {e}");
|
return Err!("Failed to modify and reload the global tracing log level: {e}");
|
||||||
},
|
},
|
||||||
|
@ -558,8 +553,8 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.latest_pdu_in_room(&room_id)
|
.latest_pdu_in_room(&room_id)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?
|
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?
|
||||||
.event_id()
|
.event_id
|
||||||
.to_owned(),
|
.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
|
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
|
||||||
|
|
|
@ -11,7 +11,7 @@ use crate::admin_command_dispatch;
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum DebugCommand {
|
pub(super) enum DebugCommand {
|
||||||
/// - Echo input of admin command
|
/// - Echo input of admin command
|
||||||
Echo {
|
Echo {
|
||||||
message: Vec<String>,
|
message: Vec<String>,
|
||||||
|
@ -32,13 +32,13 @@ pub enum DebugCommand {
|
||||||
/// the command.
|
/// the command.
|
||||||
ParsePdu,
|
ParsePdu,
|
||||||
|
|
||||||
/// - Retrieve and print a PDU by EventID from the Continuwuity database
|
/// - Retrieve and print a PDU by EventID from the conduwuit database
|
||||||
GetPdu {
|
GetPdu {
|
||||||
/// An event ID (a $ followed by the base64 reference hash)
|
/// An event ID (a $ followed by the base64 reference hash)
|
||||||
event_id: OwnedEventId,
|
event_id: OwnedEventId,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Retrieve and print a PDU by PduId from the Continuwuity database
|
/// - Retrieve and print a PDU by PduId from the conduwuit database
|
||||||
GetShortPdu {
|
GetShortPdu {
|
||||||
/// Shortroomid integer
|
/// Shortroomid integer
|
||||||
shortroomid: ShortRoomId,
|
shortroomid: ShortRoomId,
|
||||||
|
@ -182,7 +182,7 @@ pub enum DebugCommand {
|
||||||
event_id: Option<OwnedEventId>,
|
event_id: Option<OwnedEventId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Runs a server name through Continuwuity's true destination resolution
|
/// - Runs a server name through conduwuit's true destination resolution
|
||||||
/// process
|
/// process
|
||||||
///
|
///
|
||||||
/// Useful for debugging well-known issues
|
/// Useful for debugging well-known issues
|
||||||
|
|
|
@ -4,7 +4,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, clap::Subcommand)]
|
#[derive(Debug, clap::Subcommand)]
|
||||||
pub enum TesterCommand {
|
pub(crate) enum TesterCommand {
|
||||||
Panic,
|
Panic,
|
||||||
Failure,
|
Failure,
|
||||||
Tester,
|
Tester,
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::admin_command_dispatch;
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum FederationCommand {
|
pub(super) enum FederationCommand {
|
||||||
/// - List all rooms we are currently handling an incoming pdu from
|
/// - List all rooms we are currently handling an incoming pdu from
|
||||||
IncomingFederation,
|
IncomingFederation,
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ use crate::admin_command_dispatch;
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum MediaCommand {
|
pub(super) enum MediaCommand {
|
||||||
/// - Deletes a single media file from our database and on the filesystem
|
/// - Deletes a single media file from our database and on the filesystem
|
||||||
/// via a single MXC URL or event ID (not redacted)
|
/// via a single MXC URL or event ID (not redacted)
|
||||||
Delete {
|
Delete {
|
||||||
|
@ -90,10 +90,10 @@ pub enum MediaCommand {
|
||||||
#[arg(short, long, default_value("10000"))]
|
#[arg(short, long, default_value("10000"))]
|
||||||
timeout: u32,
|
timeout: u32,
|
||||||
|
|
||||||
#[arg(long, default_value("800"))]
|
#[arg(short, long, default_value("800"))]
|
||||||
width: u32,
|
width: u32,
|
||||||
|
|
||||||
#[arg(long, default_value("800"))]
|
#[arg(short, long, default_value("800"))]
|
||||||
height: u32,
|
height: u32,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,8 +33,6 @@ conduwuit::mod_ctor! {}
|
||||||
conduwuit::mod_dtor! {}
|
conduwuit::mod_dtor! {}
|
||||||
conduwuit::rustc_flags_capture! {}
|
conduwuit::rustc_flags_capture! {}
|
||||||
|
|
||||||
pub use crate::admin::AdminCommand;
|
|
||||||
|
|
||||||
/// Install the admin command processor
|
/// Install the admin command processor
|
||||||
pub async fn init(admin_service: &service::admin::Service) {
|
pub async fn init(admin_service: &service::admin::Service) {
|
||||||
_ = admin_service
|
_ = admin_service
|
||||||
|
|
|
@ -94,7 +94,8 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
|
||||||
|
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
|
fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
|
||||||
let link = "Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺";
|
let link =
|
||||||
|
"Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺";
|
||||||
let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}");
|
let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}");
|
||||||
let content = RoomMessageEventContent::notice_markdown(msg);
|
let content = RoomMessageEventContent::notice_markdown(msg);
|
||||||
error!("Panic while processing command: {error:?}");
|
error!("Panic while processing command: {error:?}");
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/account_data.rs
|
/// All the getters and iterators from src/database/key_value/account_data.rs
|
||||||
pub enum AccountDataCommand {
|
pub(crate) enum AccountDataCommand {
|
||||||
/// - Returns all changes to the account data that happened after `since`.
|
/// - Returns all changes to the account data that happened after `since`.
|
||||||
ChangesSince {
|
ChangesSince {
|
||||||
/// Full user ID
|
/// Full user ID
|
||||||
|
|
|
@ -6,7 +6,7 @@ use crate::Context;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/appservice.rs
|
/// All the getters and iterators from src/database/key_value/appservice.rs
|
||||||
pub enum AppserviceCommand {
|
pub(crate) enum AppserviceCommand {
|
||||||
/// - Gets the appservice registration info/details from the ID as a string
|
/// - Gets the appservice registration info/details from the ID as a string
|
||||||
GetRegistration {
|
GetRegistration {
|
||||||
/// Appservice registration ID
|
/// Appservice registration ID
|
||||||
|
|
|
@ -6,7 +6,7 @@ use crate::Context;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/globals.rs
|
/// All the getters and iterators from src/database/key_value/globals.rs
|
||||||
pub enum GlobalsCommand {
|
pub(crate) enum GlobalsCommand {
|
||||||
DatabaseVersion,
|
DatabaseVersion,
|
||||||
|
|
||||||
CurrentCount,
|
CurrentCount,
|
||||||
|
|
|
@ -27,7 +27,7 @@ use crate::admin_command_dispatch;
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// Query tables from database
|
/// Query tables from database
|
||||||
pub enum QueryCommand {
|
pub(super) enum QueryCommand {
|
||||||
/// - account_data.rs iterators and getters
|
/// - account_data.rs iterators and getters
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
AccountData(AccountDataCommand),
|
AccountData(AccountDataCommand),
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::Context;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/presence.rs
|
/// All the getters and iterators from src/database/key_value/presence.rs
|
||||||
pub enum PresenceCommand {
|
pub(crate) enum PresenceCommand {
|
||||||
/// - Returns the latest presence event for the given user.
|
/// - Returns the latest presence event for the given user.
|
||||||
GetPresence {
|
GetPresence {
|
||||||
/// Full user ID
|
/// Full user ID
|
||||||
|
|
|
@ -5,7 +5,7 @@ use ruma::OwnedUserId;
|
||||||
use crate::Context;
|
use crate::Context;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum PusherCommand {
|
pub(crate) enum PusherCommand {
|
||||||
/// - Returns all the pushers for the user.
|
/// - Returns all the pushers for the user.
|
||||||
GetPushers {
|
GetPushers {
|
||||||
/// Full user ID
|
/// Full user ID
|
||||||
|
|
|
@ -19,7 +19,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
#[allow(clippy::enum_variant_names)]
|
#[allow(clippy::enum_variant_names)]
|
||||||
/// Query tables from database
|
/// Query tables from database
|
||||||
pub enum RawCommand {
|
pub(crate) enum RawCommand {
|
||||||
/// - List database maps
|
/// - List database maps
|
||||||
RawMaps,
|
RawMaps,
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// Resolver service and caches
|
/// Resolver service and caches
|
||||||
pub enum ResolverCommand {
|
pub(crate) enum ResolverCommand {
|
||||||
/// Query the destinations cache
|
/// Query the destinations cache
|
||||||
DestinationsCache {
|
DestinationsCache {
|
||||||
server_name: Option<OwnedServerName>,
|
server_name: Option<OwnedServerName>,
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::Context;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
|
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
|
||||||
pub enum RoomAliasCommand {
|
pub(crate) enum RoomAliasCommand {
|
||||||
ResolveLocalAlias {
|
ResolveLocalAlias {
|
||||||
/// Full room alias
|
/// Full room alias
|
||||||
alias: OwnedRoomAliasId,
|
alias: OwnedRoomAliasId,
|
||||||
|
|
|
@ -6,7 +6,7 @@ use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
||||||
use crate::Context;
|
use crate::Context;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum RoomStateCacheCommand {
|
pub(crate) enum RoomStateCacheCommand {
|
||||||
ServerInRoom {
|
ServerInRoom {
|
||||||
server: OwnedServerName,
|
server: OwnedServerName,
|
||||||
room_id: OwnedRoomId,
|
room_id: OwnedRoomId,
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// Query tables from database
|
/// Query tables from database
|
||||||
pub enum RoomTimelineCommand {
|
pub(crate) enum RoomTimelineCommand {
|
||||||
Pdus {
|
Pdus {
|
||||||
room_id: OwnedRoomOrAliasId,
|
room_id: OwnedRoomOrAliasId,
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::Context;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/sending.rs
|
/// All the getters and iterators from src/database/key_value/sending.rs
|
||||||
pub enum SendingCommand {
|
pub(crate) enum SendingCommand {
|
||||||
/// - Queries database for all `servercurrentevent_data`
|
/// - Queries database for all `servercurrentevent_data`
|
||||||
ActiveRequests,
|
ActiveRequests,
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// Query tables from database
|
/// Query tables from database
|
||||||
pub enum ShortCommand {
|
pub(crate) enum ShortCommand {
|
||||||
ShortEventId {
|
ShortEventId {
|
||||||
event_id: OwnedEventId,
|
event_id: OwnedEventId,
|
||||||
},
|
},
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/users.rs
|
/// All the getters and iterators from src/database/key_value/users.rs
|
||||||
pub enum UsersCommand {
|
pub(crate) enum UsersCommand {
|
||||||
CountUsers,
|
CountUsers,
|
||||||
|
|
||||||
IterUsers,
|
IterUsers,
|
||||||
|
|
|
@ -8,7 +8,7 @@ use ruma::{OwnedRoomAliasId, OwnedRoomId};
|
||||||
use crate::Context;
|
use crate::Context;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum RoomAliasCommand {
|
pub(crate) enum RoomAliasCommand {
|
||||||
/// - Make an alias point to a room.
|
/// - Make an alias point to a room.
|
||||||
Set {
|
Set {
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
|
|
|
@ -6,7 +6,7 @@ use ruma::OwnedRoomId;
|
||||||
use crate::{Context, PAGE_SIZE, get_room_info};
|
use crate::{Context, PAGE_SIZE, get_room_info};
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum RoomDirectoryCommand {
|
pub(crate) enum RoomDirectoryCommand {
|
||||||
/// - Publish a room to the room directory
|
/// - Publish a room to the room directory
|
||||||
Publish {
|
Publish {
|
||||||
/// The room id of the room to publish
|
/// The room id of the room to publish
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum RoomInfoCommand {
|
pub(crate) enum RoomInfoCommand {
|
||||||
/// - List joined members in a room
|
/// - List joined members in a room
|
||||||
ListJoinedMembers {
|
ListJoinedMembers {
|
||||||
room_id: OwnedRoomId,
|
room_id: OwnedRoomId,
|
||||||
|
|
|
@ -16,7 +16,7 @@ use crate::admin_command_dispatch;
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum RoomCommand {
|
pub(super) enum RoomCommand {
|
||||||
/// - List all rooms the server knows about
|
/// - List all rooms the server knows about
|
||||||
#[clap(alias = "list")]
|
#[clap(alias = "list")]
|
||||||
ListRooms {
|
ListRooms {
|
||||||
|
|
|
@ -1,18 +1,18 @@
|
||||||
use api::client::leave_room;
|
use api::client::leave_room;
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug, info,
|
Err, Result, debug,
|
||||||
utils::{IterStream, ReadyExt},
|
utils::{IterStream, ReadyExt},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use futures::{FutureExt, StreamExt};
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
|
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum RoomModerationCommand {
|
pub(crate) enum RoomModerationCommand {
|
||||||
/// - Bans a room from local users joining and evicts all our local users
|
/// - Bans a room from local users joining and evicts all our local users
|
||||||
/// (including server
|
/// (including server
|
||||||
/// admins)
|
/// admins)
|
||||||
|
@ -70,6 +70,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!("Room specified is a room ID, banning room ID");
|
debug!("Room specified is a room ID, banning room ID");
|
||||||
|
self.services.rooms.metadata.ban_room(room_id, true);
|
||||||
|
|
||||||
room_id.to_owned()
|
room_id.to_owned()
|
||||||
} else if room.is_room_alias_id() {
|
} else if room.is_room_alias_id() {
|
||||||
|
@ -89,6 +90,20 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
locally, if not using get_alias_helper to fetch room ID remotely"
|
locally, if not using get_alias_helper to fetch room ID remotely"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let room_id = match self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(room_alias)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
| Ok(room_id) => room_id,
|
||||||
|
| _ => {
|
||||||
|
debug!(
|
||||||
|
"We don't have this room alias to a room ID locally, attempting to fetch \
|
||||||
|
room ID over federation"
|
||||||
|
);
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -100,14 +115,22 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
debug!(
|
debug!(
|
||||||
?room_id,
|
?room_id,
|
||||||
?servers,
|
?servers,
|
||||||
"Got federation response fetching room ID for room {room}"
|
"Got federation response fetching room ID for {room_id}"
|
||||||
);
|
);
|
||||||
room_id
|
room_id
|
||||||
},
|
},
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
return Err!("Failed to resolve room alias {room} to a room ID: {e}");
|
return Err!(
|
||||||
|
"Failed to resolve room alias {room_alias} to a room ID: {e}"
|
||||||
|
);
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
self.services.rooms.metadata.ban_room(&room_id, true);
|
||||||
|
|
||||||
|
room_id
|
||||||
} else {
|
} else {
|
||||||
return Err!(
|
return Err!(
|
||||||
"Room specified is not a room ID or room alias. Please note that this requires a \
|
"Room specified is not a room ID or room alias. Please note that this requires a \
|
||||||
|
@ -116,7 +139,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("Making all users leave the room {room_id} and forgetting it");
|
debug!("Making all users leave the room {room_id} and forgetting it");
|
||||||
let mut users = self
|
let mut users = self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -127,15 +150,12 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
.boxed();
|
.boxed();
|
||||||
|
|
||||||
while let Some(ref user_id) = users.next().await {
|
while let Some(ref user_id) = users.next().await {
|
||||||
info!(
|
debug!(
|
||||||
"Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \
|
"Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \
|
||||||
evicting admins too)",
|
evicting admins too)",
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None)
|
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
|
||||||
.boxed()
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
warn!("Failed to leave room: {e}");
|
warn!("Failed to leave room: {e}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,9 +177,10 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
self.services.rooms.directory.set_not_public(&room_id); // remove from the room directory
|
// unpublish from room directory
|
||||||
self.services.rooms.metadata.ban_room(&room_id, true); // prevent further joins
|
self.services.rooms.directory.set_not_public(&room_id);
|
||||||
self.services.rooms.metadata.disable_room(&room_id, true); // disable federation
|
|
||||||
|
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||||
|
|
||||||
self.write_str(
|
self.write_str(
|
||||||
"Room banned, removed all our local users, and disabled incoming federation with room.",
|
"Room banned, removed all our local users, and disabled incoming federation with room.",
|
||||||
|
@ -281,6 +302,8 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||||
}
|
}
|
||||||
|
|
||||||
for room_id in room_ids {
|
for room_id in room_ids {
|
||||||
|
self.services.rooms.metadata.ban_room(&room_id, true);
|
||||||
|
|
||||||
debug!("Banned {room_id} successfully");
|
debug!("Banned {room_id} successfully");
|
||||||
room_ban_count = room_ban_count.saturating_add(1);
|
room_ban_count = room_ban_count.saturating_add(1);
|
||||||
|
|
||||||
|
@ -300,10 +323,7 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||||
evicting admins too)",
|
evicting admins too)",
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None)
|
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
|
||||||
.boxed()
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
warn!("Failed to leave room: {e}");
|
warn!("Failed to leave room: {e}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,9 +346,9 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
self.services.rooms.metadata.ban_room(&room_id, true);
|
|
||||||
// unpublish from room directory, ignore errors
|
// unpublish from room directory, ignore errors
|
||||||
self.services.rooms.directory.set_not_public(&room_id);
|
self.services.rooms.directory.set_not_public(&room_id);
|
||||||
|
|
||||||
self.services.rooms.metadata.disable_room(&room_id, true);
|
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ use crate::admin_command_dispatch;
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum ServerCommand {
|
pub(super) enum ServerCommand {
|
||||||
/// - Time elapsed since startup
|
/// - Time elapsed since startup
|
||||||
Uptime,
|
Uptime,
|
||||||
|
|
||||||
|
|
|
@ -1,16 +1,14 @@
|
||||||
use std::{collections::BTreeMap, fmt::Write as _};
|
use std::{collections::BTreeMap, fmt::Write as _};
|
||||||
|
|
||||||
use api::client::{
|
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room};
|
||||||
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, update_avatar_url,
|
|
||||||
update_displayname,
|
|
||||||
};
|
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
||||||
matrix::{Event, pdu::PduBuilder},
|
matrix::pdu::PduBuilder,
|
||||||
utils::{self, ReadyExt},
|
utils::{self, ReadyExt},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use futures::{FutureExt, StreamExt};
|
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
|
||||||
|
use futures::StreamExt;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
|
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
|
||||||
events::{
|
events::{
|
||||||
|
@ -286,9 +284,8 @@ pub(super) async fn reset_password(&self, username: String, password: Option<Str
|
||||||
.set_password(&user_id, Some(new_password.as_str()))
|
.set_password(&user_id, Some(new_password.as_str()))
|
||||||
{
|
{
|
||||||
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
||||||
| Ok(()) => {
|
| Ok(()) =>
|
||||||
write!(self, "Successfully reset the password for user {user_id}: `{new_password}`")
|
write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"),
|
||||||
},
|
|
||||||
}
|
}
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -699,9 +696,7 @@ pub(super) async fn force_leave_room(
|
||||||
return Err!("{user_id} is not joined in the room");
|
return Err!("{user_id} is not joined in the room");
|
||||||
}
|
}
|
||||||
|
|
||||||
leave_room(self.services, &user_id, &room_id, None)
|
leave_room(self.services, &user_id, &room_id, None).await?;
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
self.write_str(&format!("{user_id} has left {room_id}.",))
|
self.write_str(&format!("{user_id} has left {room_id}.",))
|
||||||
.await
|
.await
|
||||||
|
@ -738,7 +733,7 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCreate, "")
|
.room_state_get(&room_id, &StateEventType::RoomCreate, "")
|
||||||
.await
|
.await
|
||||||
.is_ok_and(|event| event.sender() == user_id);
|
.is_ok_and(|event| event.sender == user_id);
|
||||||
|
|
||||||
if !user_can_demote_self {
|
if !user_can_demote_self {
|
||||||
return Err!("User is not allowed to modify their own power levels in the room.",);
|
return Err!("User is not allowed to modify their own power levels in the room.",);
|
||||||
|
@ -889,7 +884,10 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||||
return Err!("Event is already redacted.");
|
return Err!("Event is already redacted.");
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.services.globals.user_is_local(event.sender()) {
|
let room_id = event.room_id;
|
||||||
|
let sender_user = event.sender;
|
||||||
|
|
||||||
|
if !self.services.globals.user_is_local(&sender_user) {
|
||||||
return Err!("This command only works on local users.");
|
return Err!("This command only works on local users.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -899,21 +897,21 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||||
);
|
);
|
||||||
|
|
||||||
let redaction_event_id = {
|
let redaction_event_id = {
|
||||||
let state_lock = self.services.rooms.state.mutex.lock(event.room_id()).await;
|
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
|
||||||
|
|
||||||
self.services
|
self.services
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.build_and_append_pdu(
|
.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
redacts: Some(event.event_id().to_owned()),
|
redacts: Some(event.event_id.clone()),
|
||||||
..PduBuilder::timeline(&RoomRedactionEventContent {
|
..PduBuilder::timeline(&RoomRedactionEventContent {
|
||||||
redacts: Some(event.event_id().to_owned()),
|
redacts: Some(event.event_id.clone()),
|
||||||
reason: Some(reason),
|
reason: Some(reason),
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
event.sender(),
|
&sender_user,
|
||||||
event.room_id(),
|
&room_id,
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::admin_command_dispatch;
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub enum UserCommand {
|
pub(super) enum UserCommand {
|
||||||
/// - Create a new user
|
/// - Create a new user
|
||||||
#[clap(alias = "create")]
|
#[clap(alias = "create")]
|
||||||
CreateUser {
|
CreateUser {
|
||||||
|
|
|
@ -3,9 +3,10 @@ use std::fmt::Write;
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Error, Event, Result, debug_info, err, error, info, is_equal_to,
|
Err, Error, Result, debug_info, err, error, info, is_equal_to,
|
||||||
matrix::pdu::PduBuilder,
|
matrix::pdu::PduBuilder,
|
||||||
utils::{self, ReadyExt, stream::BroadbandExt},
|
utils,
|
||||||
|
utils::{ReadyExt, stream::BroadbandExt},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
|
@ -150,32 +151,16 @@ pub(crate) async fn register_route(
|
||||||
if !services.config.allow_registration && body.appservice_info.is_none() {
|
if !services.config.allow_registration && body.appservice_info.is_none() {
|
||||||
match (body.username.as_ref(), body.initial_device_display_name.as_ref()) {
|
match (body.username.as_ref(), body.initial_device_display_name.as_ref()) {
|
||||||
| (Some(username), Some(device_display_name)) => {
|
| (Some(username), Some(device_display_name)) => {
|
||||||
info!(
|
info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled");
|
||||||
%is_guest,
|
|
||||||
user = %username,
|
|
||||||
device_name = %device_display_name,
|
|
||||||
"Rejecting registration attempt as registration is disabled"
|
|
||||||
);
|
|
||||||
},
|
},
|
||||||
| (Some(username), _) => {
|
| (Some(username), _) => {
|
||||||
info!(
|
info!(%is_guest, user = %username, "Rejecting registration attempt as registration is disabled");
|
||||||
%is_guest,
|
|
||||||
user = %username,
|
|
||||||
"Rejecting registration attempt as registration is disabled"
|
|
||||||
);
|
|
||||||
},
|
},
|
||||||
| (_, Some(device_display_name)) => {
|
| (_, Some(device_display_name)) => {
|
||||||
info!(
|
info!(%is_guest, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled");
|
||||||
%is_guest,
|
|
||||||
device_name = %device_display_name,
|
|
||||||
"Rejecting registration attempt as registration is disabled"
|
|
||||||
);
|
|
||||||
},
|
},
|
||||||
| (None, _) => {
|
| (None, _) => {
|
||||||
info!(
|
info!(%is_guest, "Rejecting registration attempt as registration is disabled");
|
||||||
%is_guest,
|
|
||||||
"Rejecting registration attempt as registration is disabled"
|
|
||||||
);
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,38 +276,20 @@ pub(crate) async fn register_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// UIAA
|
// UIAA
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo;
|
||||||
flows: Vec::new(),
|
let skip_auth = if services.globals.registration_token.is_some() {
|
||||||
|
// Registration token required
|
||||||
|
uiaainfo = UiaaInfo {
|
||||||
|
flows: vec![AuthFlow {
|
||||||
|
stages: vec![AuthType::RegistrationToken],
|
||||||
|
}],
|
||||||
completed: Vec::new(),
|
completed: Vec::new(),
|
||||||
params: Box::default(),
|
params: Box::default(),
|
||||||
session: None,
|
session: None,
|
||||||
auth_error: None,
|
auth_error: None,
|
||||||
};
|
};
|
||||||
let skip_auth = body.appservice_info.is_some() || is_guest;
|
body.appservice_info.is_some()
|
||||||
|
} else {
|
||||||
// Populate required UIAA flows
|
|
||||||
if services.globals.registration_token.is_some() {
|
|
||||||
// Registration token required
|
|
||||||
uiaainfo.flows.push(AuthFlow {
|
|
||||||
stages: vec![AuthType::RegistrationToken],
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if services.config.recaptcha_private_site_key.is_some() {
|
|
||||||
if let Some(pubkey) = &services.config.recaptcha_site_key {
|
|
||||||
// ReCaptcha required
|
|
||||||
uiaainfo
|
|
||||||
.flows
|
|
||||||
.push(AuthFlow { stages: vec![AuthType::ReCaptcha] });
|
|
||||||
uiaainfo.params = serde_json::value::to_raw_value(&serde_json::json!({
|
|
||||||
"m.login.recaptcha": {
|
|
||||||
"public_key": pubkey,
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
.expect("Failed to serialize recaptcha params");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if uiaainfo.flows.is_empty() && !skip_auth {
|
|
||||||
// No registration token necessary, but clients must still go through the flow
|
// No registration token necessary, but clients must still go through the flow
|
||||||
uiaainfo = UiaaInfo {
|
uiaainfo = UiaaInfo {
|
||||||
flows: vec![AuthFlow { stages: vec![AuthType::Dummy] }],
|
flows: vec![AuthFlow { stages: vec![AuthType::Dummy] }],
|
||||||
|
@ -331,7 +298,8 @@ pub(crate) async fn register_route(
|
||||||
session: None,
|
session: None,
|
||||||
auth_error: None,
|
auth_error: None,
|
||||||
};
|
};
|
||||||
}
|
body.appservice_info.is_some() || is_guest
|
||||||
|
};
|
||||||
|
|
||||||
if !skip_auth {
|
if !skip_auth {
|
||||||
match &body.auth {
|
match &body.auth {
|
||||||
|
@ -383,7 +351,8 @@ pub(crate) async fn register_route(
|
||||||
if !services.globals.new_user_displayname_suffix().is_empty()
|
if !services.globals.new_user_displayname_suffix().is_empty()
|
||||||
&& body.appservice_info.is_none()
|
&& body.appservice_info.is_none()
|
||||||
{
|
{
|
||||||
write!(displayname, " {}", services.server.config.new_user_displayname_suffix)?;
|
write!(displayname, " {}", services.server.config.new_user_displayname_suffix)
|
||||||
|
.expect("should be able to write to string buffer");
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -401,7 +370,8 @@ pub(crate) async fn register_route(
|
||||||
content: ruma::events::push_rules::PushRulesEventContent {
|
content: ruma::events::push_rules::PushRulesEventContent {
|
||||||
global: push::Ruleset::server_default(&user_id),
|
global: push::Ruleset::server_default(&user_id),
|
||||||
},
|
},
|
||||||
})?,
|
})
|
||||||
|
.expect("to json always works"),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -446,21 +416,32 @@ pub(crate) async fn register_route(
|
||||||
// log in conduit admin channel if a non-guest user registered
|
// log in conduit admin channel if a non-guest user registered
|
||||||
if body.appservice_info.is_none() && !is_guest {
|
if body.appservice_info.is_none() && !is_guest {
|
||||||
if !device_display_name.is_empty() {
|
if !device_display_name.is_empty() {
|
||||||
let notice = format!(
|
info!(
|
||||||
"New user \"{user_id}\" registered on this server from IP {client} and device \
|
"New user \"{user_id}\" registered on this server with device display name: \
|
||||||
display name \"{device_display_name}\""
|
\"{device_display_name}\""
|
||||||
);
|
);
|
||||||
|
|
||||||
info!("{notice}");
|
|
||||||
if services.server.config.admin_room_notices {
|
if services.server.config.admin_room_notices {
|
||||||
services.admin.notice(¬ice).await;
|
services
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
|
"New user \"{user_id}\" registered on this server from IP {client} and \
|
||||||
|
device display name \"{device_display_name}\""
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let notice = format!("New user \"{user_id}\" registered on this server.");
|
info!("New user \"{user_id}\" registered on this server.");
|
||||||
|
|
||||||
info!("{notice}");
|
|
||||||
if services.server.config.admin_room_notices {
|
if services.server.config.admin_room_notices {
|
||||||
services.admin.notice(¬ice).await;
|
services
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
|
"New user \"{user_id}\" registered on this server from IP {client}"
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -473,22 +454,24 @@ pub(crate) async fn register_route(
|
||||||
if services.server.config.admin_room_notices {
|
if services.server.config.admin_room_notices {
|
||||||
services
|
services
|
||||||
.admin
|
.admin
|
||||||
.notice(&format!(
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
"Guest user \"{user_id}\" with device display name \
|
"Guest user \"{user_id}\" with device display name \
|
||||||
\"{device_display_name}\" registered on this server from IP {client}"
|
\"{device_display_name}\" registered on this server from IP {client}"
|
||||||
))
|
)))
|
||||||
.await;
|
.await
|
||||||
|
.ok();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
#[allow(clippy::collapsible_else_if)]
|
#[allow(clippy::collapsible_else_if)]
|
||||||
if services.server.config.admin_room_notices {
|
if services.server.config.admin_room_notices {
|
||||||
services
|
services
|
||||||
.admin
|
.admin
|
||||||
.notice(&format!(
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
"Guest user \"{user_id}\" with no device display name registered on \
|
"Guest user \"{user_id}\" with no device display name registered on \
|
||||||
this server from IP {client}",
|
this server from IP {client}",
|
||||||
))
|
)))
|
||||||
.await;
|
.await
|
||||||
|
.ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -507,25 +490,6 @@ pub(crate) async fn register_route(
|
||||||
{
|
{
|
||||||
services.admin.make_user_admin(&user_id).await?;
|
services.admin.make_user_admin(&user_id).await?;
|
||||||
warn!("Granting {user_id} admin privileges as the first user");
|
warn!("Granting {user_id} admin privileges as the first user");
|
||||||
} else if services.config.suspend_on_register {
|
|
||||||
// This is not an admin, suspend them.
|
|
||||||
// Note that we can still do auto joins for suspended users
|
|
||||||
services
|
|
||||||
.users
|
|
||||||
.suspend_account(&user_id, &services.globals.server_user)
|
|
||||||
.await;
|
|
||||||
// And send an @room notice to the admin room, to prompt admins to review the
|
|
||||||
// new user and ideally unsuspend them if deemed appropriate.
|
|
||||||
if services.server.config.admin_room_notices {
|
|
||||||
services
|
|
||||||
.admin
|
|
||||||
.send_loud_message(RoomMessageEventContent::text_plain(format!(
|
|
||||||
"User {user_id} has been suspended as they are not the first user \
|
|
||||||
on this server. Please review and unsuspend them if appropriate."
|
|
||||||
)))
|
|
||||||
.await
|
|
||||||
.ok();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -620,6 +584,7 @@ pub(crate) async fn change_password_route(
|
||||||
.sender_user
|
.sender_user
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?;
|
.ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?;
|
||||||
|
let sender_device = body.sender_device();
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
flows: vec![AuthFlow { stages: vec![AuthType::Password] }],
|
flows: vec![AuthFlow { stages: vec![AuthType::Password] }],
|
||||||
|
@ -633,7 +598,7 @@ pub(crate) async fn change_password_route(
|
||||||
| Some(auth) => {
|
| Some(auth) => {
|
||||||
let (worked, uiaainfo) = services
|
let (worked, uiaainfo) = services
|
||||||
.uiaa
|
.uiaa
|
||||||
.try_auth(sender_user, body.sender_device(), auth, &uiaainfo)
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if !worked {
|
if !worked {
|
||||||
|
@ -647,7 +612,7 @@ pub(crate) async fn change_password_route(
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services
|
services
|
||||||
.uiaa
|
.uiaa
|
||||||
.create(sender_user, body.sender_device(), &uiaainfo, json);
|
.create(sender_user, sender_device, &uiaainfo, json);
|
||||||
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
},
|
},
|
||||||
|
@ -666,7 +631,7 @@ pub(crate) async fn change_password_route(
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.all_device_ids(sender_user)
|
.all_device_ids(sender_user)
|
||||||
.ready_filter(|id| *id != body.sender_device())
|
.ready_filter(|id| *id != sender_device)
|
||||||
.for_each(|id| services.users.remove_device(sender_user, id))
|
.for_each(|id| services.users.remove_device(sender_user, id))
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -675,17 +640,17 @@ pub(crate) async fn change_password_route(
|
||||||
.pusher
|
.pusher
|
||||||
.get_pushkeys(sender_user)
|
.get_pushkeys(sender_user)
|
||||||
.map(ToOwned::to_owned)
|
.map(ToOwned::to_owned)
|
||||||
.broad_filter_map(async |pushkey| {
|
.broad_filter_map(|pushkey| async move {
|
||||||
services
|
services
|
||||||
.pusher
|
.pusher
|
||||||
.get_pusher_device(&pushkey)
|
.get_pusher_device(&pushkey)
|
||||||
.await
|
.await
|
||||||
.ok()
|
.ok()
|
||||||
.filter(|pusher_device| pusher_device != body.sender_device())
|
.filter(|pusher_device| pusher_device != sender_device)
|
||||||
.is_some()
|
.is_some()
|
||||||
.then_some(pushkey)
|
.then_some(pushkey)
|
||||||
})
|
})
|
||||||
.for_each(async |pushkey| {
|
.for_each(|pushkey| async move {
|
||||||
services.pusher.delete_pusher(sender_user, &pushkey).await;
|
services.pusher.delete_pusher(sender_user, &pushkey).await;
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
|
@ -696,8 +661,11 @@ pub(crate) async fn change_password_route(
|
||||||
if services.server.config.admin_room_notices {
|
if services.server.config.admin_room_notices {
|
||||||
services
|
services
|
||||||
.admin
|
.admin
|
||||||
.notice(&format!("User {sender_user} changed their password."))
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
.await;
|
"User {sender_user} changed their password."
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(change_password::v3::Response {})
|
Ok(change_password::v3::Response {})
|
||||||
|
@ -712,10 +680,13 @@ pub(crate) async fn whoami_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<whoami::v3::Request>,
|
body: Ruma<whoami::v3::Request>,
|
||||||
) -> Result<whoami::v3::Response> {
|
) -> Result<whoami::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let device_id = body.sender_device.clone();
|
||||||
|
|
||||||
Ok(whoami::v3::Response {
|
Ok(whoami::v3::Response {
|
||||||
user_id: body.sender_user().to_owned(),
|
user_id: sender_user.clone(),
|
||||||
device_id: body.sender_device.clone(),
|
device_id,
|
||||||
is_guest: services.users.is_deactivated(body.sender_user()).await?
|
is_guest: services.users.is_deactivated(sender_user).await?
|
||||||
&& body.appservice_info.is_none(),
|
&& body.appservice_info.is_none(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -743,6 +714,7 @@ pub(crate) async fn deactivate_route(
|
||||||
.sender_user
|
.sender_user
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?;
|
.ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?;
|
||||||
|
let sender_device = body.sender_device();
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
flows: vec![AuthFlow { stages: vec![AuthType::Password] }],
|
flows: vec![AuthFlow { stages: vec![AuthType::Password] }],
|
||||||
|
@ -756,7 +728,7 @@ pub(crate) async fn deactivate_route(
|
||||||
| Some(auth) => {
|
| Some(auth) => {
|
||||||
let (worked, uiaainfo) = services
|
let (worked, uiaainfo) = services
|
||||||
.uiaa
|
.uiaa
|
||||||
.try_auth(sender_user, body.sender_device(), auth, &uiaainfo)
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if !worked {
|
if !worked {
|
||||||
|
@ -769,7 +741,7 @@ pub(crate) async fn deactivate_route(
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services
|
services
|
||||||
.uiaa
|
.uiaa
|
||||||
.create(sender_user, body.sender_device(), &uiaainfo, json);
|
.create(sender_user, sender_device, &uiaainfo, json);
|
||||||
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
},
|
},
|
||||||
|
@ -791,17 +763,18 @@ pub(crate) async fn deactivate_route(
|
||||||
super::update_displayname(&services, sender_user, None, &all_joined_rooms).await;
|
super::update_displayname(&services, sender_user, None, &all_joined_rooms).await;
|
||||||
super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await;
|
super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await;
|
||||||
|
|
||||||
full_user_deactivate(&services, sender_user, &all_joined_rooms)
|
full_user_deactivate(&services, sender_user, &all_joined_rooms).await?;
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("User {sender_user} deactivated their account.");
|
info!("User {sender_user} deactivated their account.");
|
||||||
|
|
||||||
if services.server.config.admin_room_notices {
|
if services.server.config.admin_room_notices {
|
||||||
services
|
services
|
||||||
.admin
|
.admin
|
||||||
.notice(&format!("User {sender_user} deactivated their account."))
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
.await;
|
"User {sender_user} deactivated their account."
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(deactivate::v3::Response {
|
Ok(deactivate::v3::Response {
|
||||||
|
@ -878,7 +851,6 @@ pub async fn full_user_deactivate(
|
||||||
all_joined_rooms: &[OwnedRoomId],
|
all_joined_rooms: &[OwnedRoomId],
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
services.users.deactivate_account(user_id).await.ok();
|
services.users.deactivate_account(user_id).await.ok();
|
||||||
|
|
||||||
super::update_displayname(services, user_id, None, all_joined_rooms).await;
|
super::update_displayname(services, user_id, None, all_joined_rooms).await;
|
||||||
super::update_avatar_url(services, user_id, None, None, all_joined_rooms).await;
|
super::update_avatar_url(services, user_id, None, None, all_joined_rooms).await;
|
||||||
|
|
||||||
|
@ -915,7 +887,7 @@ pub async fn full_user_deactivate(
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(room_id, &StateEventType::RoomCreate, "")
|
.room_state_get(room_id, &StateEventType::RoomCreate, "")
|
||||||
.await
|
.await
|
||||||
.is_ok_and(|event| event.sender() == user_id);
|
.is_ok_and(|event| event.sender == user_id);
|
||||||
|
|
||||||
if user_can_demote_self {
|
if user_can_demote_self {
|
||||||
let mut power_levels_content = room_power_levels.unwrap_or_default();
|
let mut power_levels_content = room_power_levels.unwrap_or_default();
|
||||||
|
@ -943,7 +915,7 @@ pub async fn full_user_deactivate(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
super::leave_all_rooms(services, user_id).boxed().await;
|
super::leave_all_rooms(services, user_id).await;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ pub(crate) async fn create_alias_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<create_alias::v3::Request>,
|
body: Ruma<create_alias::v3::Request>,
|
||||||
) -> Result<create_alias::v3::Response> {
|
) -> Result<create_alias::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
if services.users.is_suspended(sender_user).await? {
|
if services.users.is_suspended(sender_user).await? {
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ pub(crate) async fn delete_alias_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<delete_alias::v3::Request>,
|
body: Ruma<delete_alias::v3::Request>,
|
||||||
) -> Result<delete_alias::v3::Response> {
|
) -> Result<delete_alias::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
if services.users.is_suspended(sender_user).await? {
|
if services.users.is_suspended(sender_user).await? {
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,8 @@ use std::cmp::Ordering;
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{Err, Result, err};
|
use conduwuit::{Err, Result, err};
|
||||||
use conduwuit_service::Services;
|
|
||||||
use futures::{FutureExt, future::try_join};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
UInt, UserId,
|
UInt,
|
||||||
api::client::backup::{
|
api::client::backup::{
|
||||||
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
||||||
create_backup_version, delete_backup_keys, delete_backup_keys_for_room,
|
create_backup_version, delete_backup_keys, delete_backup_keys_for_room,
|
||||||
|
@ -60,9 +58,21 @@ pub(crate) async fn get_latest_backup_info_route(
|
||||||
.await
|
.await
|
||||||
.map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?;
|
.map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?;
|
||||||
|
|
||||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &version).await?;
|
Ok(get_latest_backup_info::v3::Response {
|
||||||
|
algorithm,
|
||||||
Ok(get_latest_backup_info::v3::Response { algorithm, count, etag, version })
|
count: (UInt::try_from(
|
||||||
|
services
|
||||||
|
.key_backups
|
||||||
|
.count_keys(body.sender_user(), &version)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
|
etag: services
|
||||||
|
.key_backups
|
||||||
|
.get_etag(body.sender_user(), &version)
|
||||||
|
.await,
|
||||||
|
version,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v3/room_keys/version/{version}`
|
/// # `GET /_matrix/client/v3/room_keys/version/{version}`
|
||||||
|
@ -80,12 +90,17 @@ pub(crate) async fn get_backup_info_route(
|
||||||
err!(Request(NotFound("Key backup does not exist at version {:?}", body.version)))
|
err!(Request(NotFound("Key backup does not exist at version {:?}", body.version)))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
|
||||||
|
|
||||||
Ok(get_backup_info::v3::Response {
|
Ok(get_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count,
|
count: services
|
||||||
etag,
|
.key_backups
|
||||||
|
.count_keys(body.sender_user(), &body.version)
|
||||||
|
.await
|
||||||
|
.try_into()?,
|
||||||
|
etag: services
|
||||||
|
.key_backups
|
||||||
|
.get_etag(body.sender_user(), &body.version)
|
||||||
|
.await,
|
||||||
version: body.version.clone(),
|
version: body.version.clone(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -140,9 +155,17 @@ pub(crate) async fn add_backup_keys_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
Ok(add_backup_keys::v3::Response {
|
||||||
|
count: services
|
||||||
Ok(add_backup_keys::v3::Response { count, etag })
|
.key_backups
|
||||||
|
.count_keys(body.sender_user(), &body.version)
|
||||||
|
.await
|
||||||
|
.try_into()?,
|
||||||
|
etag: services
|
||||||
|
.key_backups
|
||||||
|
.get_etag(body.sender_user(), &body.version)
|
||||||
|
.await,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
|
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||||
|
@ -175,9 +198,17 @@ pub(crate) async fn add_backup_keys_for_room_route(
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
Ok(add_backup_keys_for_room::v3::Response {
|
||||||
|
count: services
|
||||||
Ok(add_backup_keys_for_room::v3::Response { count, etag })
|
.key_backups
|
||||||
|
.count_keys(body.sender_user(), &body.version)
|
||||||
|
.await
|
||||||
|
.try_into()?,
|
||||||
|
etag: services
|
||||||
|
.key_backups
|
||||||
|
.get_etag(body.sender_user(), &body.version)
|
||||||
|
.await,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||||
|
@ -275,9 +306,17 @@ pub(crate) async fn add_backup_keys_for_session_route(
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
Ok(add_backup_keys_for_session::v3::Response {
|
||||||
|
count: services
|
||||||
Ok(add_backup_keys_for_session::v3::Response { count, etag })
|
.key_backups
|
||||||
|
.count_keys(body.sender_user(), &body.version)
|
||||||
|
.await
|
||||||
|
.try_into()?,
|
||||||
|
etag: services
|
||||||
|
.key_backups
|
||||||
|
.get_etag(body.sender_user(), &body.version)
|
||||||
|
.await,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys`
|
/// # `GET /_matrix/client/r0/room_keys/keys`
|
||||||
|
@ -340,9 +379,17 @@ pub(crate) async fn delete_backup_keys_route(
|
||||||
.delete_all_keys(body.sender_user(), &body.version)
|
.delete_all_keys(body.sender_user(), &body.version)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
Ok(delete_backup_keys::v3::Response {
|
||||||
|
count: services
|
||||||
Ok(delete_backup_keys::v3::Response { count, etag })
|
.key_backups
|
||||||
|
.count_keys(body.sender_user(), &body.version)
|
||||||
|
.await
|
||||||
|
.try_into()?,
|
||||||
|
etag: services
|
||||||
|
.key_backups
|
||||||
|
.get_etag(body.sender_user(), &body.version)
|
||||||
|
.await,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
|
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||||
|
@ -357,9 +404,17 @@ pub(crate) async fn delete_backup_keys_for_room_route(
|
||||||
.delete_room_keys(body.sender_user(), &body.version, &body.room_id)
|
.delete_room_keys(body.sender_user(), &body.version, &body.room_id)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
Ok(delete_backup_keys_for_room::v3::Response {
|
||||||
|
count: services
|
||||||
Ok(delete_backup_keys_for_room::v3::Response { count, etag })
|
.key_backups
|
||||||
|
.count_keys(body.sender_user(), &body.version)
|
||||||
|
.await
|
||||||
|
.try_into()?,
|
||||||
|
etag: services
|
||||||
|
.key_backups
|
||||||
|
.get_etag(body.sender_user(), &body.version)
|
||||||
|
.await,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||||
|
@ -374,22 +429,15 @@ pub(crate) async fn delete_backup_keys_for_session_route(
|
||||||
.delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id)
|
.delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?;
|
Ok(delete_backup_keys_for_session::v3::Response {
|
||||||
|
count: services
|
||||||
Ok(delete_backup_keys_for_session::v3::Response { count, etag })
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_count_etag(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
version: &str,
|
|
||||||
) -> Result<(UInt, String)> {
|
|
||||||
let count = services
|
|
||||||
.key_backups
|
.key_backups
|
||||||
.count_keys(sender_user, version)
|
.count_keys(body.sender_user(), &body.version)
|
||||||
.map(TryInto::try_into);
|
.await
|
||||||
|
.try_into()?,
|
||||||
let etag = services.key_backups.get_etag(sender_user, version).map(Ok);
|
etag: services
|
||||||
|
.key_backups
|
||||||
Ok(try_join(count, etag).await?)
|
.get_etag(body.sender_user(), &body.version)
|
||||||
|
.await,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,8 +26,8 @@ pub(crate) async fn get_capabilities_route(
|
||||||
|
|
||||||
let mut capabilities = Capabilities::default();
|
let mut capabilities = Capabilities::default();
|
||||||
capabilities.room_versions = RoomVersionsCapability {
|
capabilities.room_versions = RoomVersionsCapability {
|
||||||
available,
|
|
||||||
default: services.server.config.default_room_version.clone(),
|
default: services.server.config.default_room_version.clone(),
|
||||||
|
available,
|
||||||
};
|
};
|
||||||
|
|
||||||
// we do not implement 3PID stuff
|
// we do not implement 3PID stuff
|
||||||
|
@ -38,12 +38,16 @@ pub(crate) async fn get_capabilities_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
// MSC4133 capability
|
// MSC4133 capability
|
||||||
capabilities.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))?;
|
capabilities
|
||||||
|
.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))
|
||||||
|
.expect("this is valid JSON we created");
|
||||||
|
|
||||||
capabilities.set(
|
capabilities
|
||||||
|
.set(
|
||||||
"org.matrix.msc4267.forget_forced_upon_leave",
|
"org.matrix.msc4267.forget_forced_upon_leave",
|
||||||
json!({"enabled": services.config.forget_forced_upon_leave}),
|
json!({"enabled": services.config.forget_forced_upon_leave}),
|
||||||
)?;
|
)
|
||||||
|
.expect("valid JSON we created");
|
||||||
|
|
||||||
Ok(get_capabilities::v3::Response { capabilities })
|
Ok(get_capabilities::v3::Response { capabilities })
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Event, Result, at, debug_warn, err, ref_at,
|
Err, Result, at, debug_warn, err,
|
||||||
|
matrix::pdu::PduEvent,
|
||||||
|
ref_at,
|
||||||
utils::{
|
utils::{
|
||||||
IterStream,
|
IterStream,
|
||||||
future::TryExtExt,
|
future::TryExtExt,
|
||||||
|
@ -109,7 +111,7 @@ pub(crate) async fn get_context_route(
|
||||||
|
|
||||||
let lazy_loading_context = lazy_loading::Context {
|
let lazy_loading_context = lazy_loading::Context {
|
||||||
user_id: sender_user,
|
user_id: sender_user,
|
||||||
device_id: Some(sender_device),
|
device_id: sender_device,
|
||||||
room_id,
|
room_id,
|
||||||
token: Some(base_count.into_unsigned()),
|
token: Some(base_count.into_unsigned()),
|
||||||
options: Some(&filter.lazy_load_options),
|
options: Some(&filter.lazy_load_options),
|
||||||
|
@ -177,12 +179,12 @@ pub(crate) async fn get_context_route(
|
||||||
.broad_filter_map(|event_id: &OwnedEventId| {
|
.broad_filter_map(|event_id: &OwnedEventId| {
|
||||||
services.rooms.timeline.get_pdu(event_id.as_ref()).ok()
|
services.rooms.timeline.get_pdu(event_id.as_ref()).ok()
|
||||||
})
|
})
|
||||||
.map(Event::into_format)
|
.map(PduEvent::into_state_event)
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
Ok(get_context::v3::Response {
|
Ok(get_context::v3::Response {
|
||||||
event: base_event.map(at!(1)).map(Event::into_format),
|
event: base_event.map(at!(1)).map(PduEvent::into_room_event),
|
||||||
|
|
||||||
start: events_before
|
start: events_before
|
||||||
.last()
|
.last()
|
||||||
|
@ -201,13 +203,13 @@ pub(crate) async fn get_context_route(
|
||||||
events_before: events_before
|
events_before: events_before
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(at!(1))
|
.map(at!(1))
|
||||||
.map(Event::into_format)
|
.map(PduEvent::into_room_event)
|
||||||
.collect(),
|
.collect(),
|
||||||
|
|
||||||
events_after: events_after
|
events_after: events_after
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(at!(1))
|
.map(at!(1))
|
||||||
.map(Event::into_format)
|
.map(PduEvent::into_room_event)
|
||||||
.collect(),
|
.collect(),
|
||||||
|
|
||||||
state,
|
state,
|
||||||
|
|
|
@ -21,9 +21,11 @@ pub(crate) async fn get_devices_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_devices::v3::Request>,
|
body: Ruma<get_devices::v3::Request>,
|
||||||
) -> Result<get_devices::v3::Response> {
|
) -> Result<get_devices::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let devices: Vec<device::Device> = services
|
let devices: Vec<device::Device> = services
|
||||||
.users
|
.users
|
||||||
.all_devices_metadata(body.sender_user())
|
.all_devices_metadata(sender_user)
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -37,9 +39,11 @@ pub(crate) async fn get_device_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_device::v3::Request>,
|
body: Ruma<get_device::v3::Request>,
|
||||||
) -> Result<get_device::v3::Response> {
|
) -> Result<get_device::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let device = services
|
let device = services
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(body.sender_user(), &body.body.device_id)
|
.get_device_metadata(sender_user, &body.body.device_id)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| err!(Request(NotFound("Device not found."))))?;
|
.map_err(|_| err!(Request(NotFound("Device not found."))))?;
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Event, Result, err, info,
|
Err, Result, err, info,
|
||||||
utils::{
|
utils::{
|
||||||
TryFutureExtExt,
|
TryFutureExtExt,
|
||||||
math::Expected,
|
math::Expected,
|
||||||
|
@ -352,7 +352,7 @@ async fn user_can_publish_room(
|
||||||
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")
|
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Ok(event) => serde_json::from_str(event.content().get())
|
| Ok(event) => serde_json::from_str(event.content.get())
|
||||||
.map_err(|_| err!(Database("Invalid event content for m.room.power_levels")))
|
.map_err(|_| err!(Database("Invalid event content for m.room.power_levels")))
|
||||||
.map(|content: RoomPowerLevelsEventContent| {
|
.map(|content: RoomPowerLevelsEventContent| {
|
||||||
RoomPowerLevels::from(content)
|
RoomPowerLevels::from(content)
|
||||||
|
@ -365,7 +365,7 @@ async fn user_can_publish_room(
|
||||||
.room_state_get(room_id, &StateEventType::RoomCreate, "")
|
.room_state_get(room_id, &StateEventType::RoomCreate, "")
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Ok(event) => Ok(event.sender() == user_id),
|
| Ok(event) => Ok(event.sender == user_id),
|
||||||
| _ => Err!(Request(Forbidden("User is not allowed to publish this room"))),
|
| _ => Err!(Request(Forbidden("User is not allowed to publish this room"))),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -13,9 +13,11 @@ pub(crate) async fn get_filter_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_filter::v3::Request>,
|
body: Ruma<get_filter::v3::Request>,
|
||||||
) -> Result<get_filter::v3::Response> {
|
) -> Result<get_filter::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.get_filter(body.sender_user(), &body.filter_id)
|
.get_filter(sender_user, &body.filter_id)
|
||||||
.await
|
.await
|
||||||
.map(get_filter::v3::Response::new)
|
.map(get_filter::v3::Response::new)
|
||||||
.map_err(|_| err!(Request(NotFound("Filter not found."))))
|
.map_err(|_| err!(Request(NotFound("Filter not found."))))
|
||||||
|
@ -28,9 +30,9 @@ pub(crate) async fn create_filter_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<create_filter::v3::Request>,
|
body: Ruma<create_filter::v3::Request>,
|
||||||
) -> Result<create_filter::v3::Response> {
|
) -> Result<create_filter::v3::Response> {
|
||||||
let filter_id = services
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
.users
|
|
||||||
.create_filter(body.sender_user(), &body.filter);
|
let filter_id = services.users.create_filter(sender_user, &body.filter);
|
||||||
|
|
||||||
Ok(create_filter::v3::Response::new(filter_id))
|
Ok(create_filter::v3::Response::new(filter_id))
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,7 +126,7 @@ pub(crate) async fn get_keys_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_keys::v3::Request>,
|
body: Ruma<get_keys::v3::Request>,
|
||||||
) -> Result<get_keys::v3::Response> {
|
) -> Result<get_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
get_keys_helper(
|
get_keys_helper(
|
||||||
&services,
|
&services,
|
||||||
|
@ -157,7 +157,8 @@ pub(crate) async fn upload_signing_keys_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<upload_signing_keys::v3::Request>,
|
body: Ruma<upload_signing_keys::v3::Request>,
|
||||||
) -> Result<upload_signing_keys::v3::Response> {
|
) -> Result<upload_signing_keys::v3::Response> {
|
||||||
let (sender_user, sender_device) = body.sender();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
// UIAA
|
// UIAA
|
||||||
let mut uiaainfo = UiaaInfo {
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
@ -202,12 +203,12 @@ pub(crate) async fn upload_signing_keys_route(
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
},
|
},
|
||||||
| _ => match body.json_body.as_ref() {
|
| _ => match body.json_body {
|
||||||
| Some(json) => {
|
| Some(json) => {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
services
|
services
|
||||||
.uiaa
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, json);
|
.create(sender_user, sender_device, &uiaainfo, &json);
|
||||||
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
},
|
},
|
||||||
|
@ -372,7 +373,7 @@ pub(crate) async fn get_key_changes_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_key_changes::v3::Request>,
|
body: Ruma<get_key_changes::v3::Request>,
|
||||||
) -> Result<get_key_changes::v3::Response> {
|
) -> Result<get_key_changes::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut device_list_updates = HashSet::new();
|
let mut device_list_updates = HashSet::new();
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ pub(crate) async fn create_content_route(
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
body: Ruma<create_content::v3::Request>,
|
body: Ruma<create_content::v3::Request>,
|
||||||
) -> Result<create_content::v3::Response> {
|
) -> Result<create_content::v3::Response> {
|
||||||
let user = body.sender_user();
|
let user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
if services.users.is_suspended(user).await? {
|
if services.users.is_suspended(user).await? {
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ pub(crate) async fn get_content_thumbnail_route(
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
body: Ruma<get_content_thumbnail::v1::Request>,
|
body: Ruma<get_content_thumbnail::v1::Request>,
|
||||||
) -> Result<get_content_thumbnail::v1::Response> {
|
) -> Result<get_content_thumbnail::v1::Response> {
|
||||||
let user = body.sender_user();
|
let user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?;
|
let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?;
|
||||||
let mxc = Mxc {
|
let mxc = Mxc {
|
||||||
|
@ -134,7 +134,7 @@ pub(crate) async fn get_content_route(
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
body: Ruma<get_content::v1::Request>,
|
body: Ruma<get_content::v1::Request>,
|
||||||
) -> Result<get_content::v1::Response> {
|
) -> Result<get_content::v1::Response> {
|
||||||
let user = body.sender_user();
|
let user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mxc = Mxc {
|
let mxc = Mxc {
|
||||||
server_name: &body.server_name,
|
server_name: &body.server_name,
|
||||||
|
@ -170,7 +170,7 @@ pub(crate) async fn get_content_as_filename_route(
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
body: Ruma<get_content_as_filename::v1::Request>,
|
body: Ruma<get_content_as_filename::v1::Request>,
|
||||||
) -> Result<get_content_as_filename::v1::Response> {
|
) -> Result<get_content_as_filename::v1::Response> {
|
||||||
let user = body.sender_user();
|
let user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mxc = Mxc {
|
let mxc = Mxc {
|
||||||
server_name: &body.server_name,
|
server_name: &body.server_name,
|
||||||
|
@ -206,7 +206,7 @@ pub(crate) async fn get_media_preview_route(
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
body: Ruma<get_media_preview::v1::Request>,
|
body: Ruma<get_media_preview::v1::Request>,
|
||||||
) -> Result<get_media_preview::v1::Response> {
|
) -> Result<get_media_preview::v1::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let url = &body.url;
|
let url = &body.url;
|
||||||
let url = Url::parse(&body.url).map_err(|e| {
|
let url = Url::parse(&body.url).map_err(|e| {
|
||||||
|
|
|
@ -55,7 +55,7 @@ pub(crate) async fn get_media_preview_legacy_route(
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
body: Ruma<get_media_preview::v3::Request>,
|
body: Ruma<get_media_preview::v3::Request>,
|
||||||
) -> Result<get_media_preview::v3::Response> {
|
) -> Result<get_media_preview::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let url = &body.url;
|
let url = &body.url;
|
||||||
let url = Url::parse(&body.url).map_err(|e| {
|
let url = Url::parse(&body.url).map_err(|e| {
|
||||||
|
|
2764
src/api/client/membership.rs
Normal file
2764
src/api/client/membership.rs
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,60 +0,0 @@
|
||||||
use axum::extract::State;
|
|
||||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
|
||||||
use ruma::{
|
|
||||||
api::client::membership::ban_user,
|
|
||||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/ban`
|
|
||||||
///
|
|
||||||
/// Tries to send a ban event into the room.
|
|
||||||
pub(crate) async fn ban_user_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<ban_user::v3::Request>,
|
|
||||||
) -> Result<ban_user::v3::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
|
|
||||||
if sender_user == body.user_id {
|
|
||||||
return Err!(Request(Forbidden("You cannot ban yourself.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
|
||||||
|
|
||||||
let current_member_content = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(&body.room_id, &body.user_id)
|
|
||||||
.await
|
|
||||||
.unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban));
|
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Ban,
|
|
||||||
reason: body.reason.clone(),
|
|
||||||
displayname: None, // display name may be offensive
|
|
||||||
avatar_url: None, // avatar may be offensive
|
|
||||||
is_direct: None,
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
redact_events: body.redact_events,
|
|
||||||
..current_member_content
|
|
||||||
}),
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
Ok(ban_user::v3::Response::new())
|
|
||||||
}
|
|
|
@ -1,52 +0,0 @@
|
||||||
use axum::extract::State;
|
|
||||||
use conduwuit::{Err, Result, is_matching, result::NotFound, utils::FutureBoolExt};
|
|
||||||
use futures::pin_mut;
|
|
||||||
use ruma::{api::client::membership::forget_room, events::room::member::MembershipState};
|
|
||||||
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/rooms/{roomId}/forget`
|
|
||||||
///
|
|
||||||
/// Forgets about a room.
|
|
||||||
///
|
|
||||||
/// - If the sender user currently left the room: Stops sender user from
|
|
||||||
/// receiving information about the room
|
|
||||||
///
|
|
||||||
/// Note: Other devices of the user have no way of knowing the room was
|
|
||||||
/// forgotten, so this has to be called from every device
|
|
||||||
pub(crate) async fn forget_room_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<forget_room::v3::Request>,
|
|
||||||
) -> Result<forget_room::v3::Response> {
|
|
||||||
let user_id = body.sender_user();
|
|
||||||
let room_id = &body.room_id;
|
|
||||||
|
|
||||||
let joined = services.rooms.state_cache.is_joined(user_id, room_id);
|
|
||||||
let knocked = services.rooms.state_cache.is_knocked(user_id, room_id);
|
|
||||||
let invited = services.rooms.state_cache.is_invited(user_id, room_id);
|
|
||||||
|
|
||||||
pin_mut!(joined, knocked, invited);
|
|
||||||
if joined.or(knocked).or(invited).await {
|
|
||||||
return Err!(Request(Unknown("You must leave the room before forgetting it")));
|
|
||||||
}
|
|
||||||
|
|
||||||
let membership = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(room_id, user_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if membership.is_not_found() {
|
|
||||||
return Err!(Request(Unknown("No membership event was found, room was never joined")));
|
|
||||||
}
|
|
||||||
|
|
||||||
let non_membership = membership
|
|
||||||
.map(|member| member.membership)
|
|
||||||
.is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban));
|
|
||||||
|
|
||||||
if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await {
|
|
||||||
services.rooms.state_cache.forget(room_id, user_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(forget_room::v3::Response::new())
|
|
||||||
}
|
|
|
@ -1,238 +0,0 @@
|
||||||
use axum::extract::State;
|
|
||||||
use axum_client_ip::InsecureClientIp;
|
|
||||||
use conduwuit::{
|
|
||||||
Err, Result, debug_error, err, info,
|
|
||||||
matrix::{event::gen_event_id_canonical_json, pdu::PduBuilder},
|
|
||||||
};
|
|
||||||
use futures::{FutureExt, join};
|
|
||||||
use ruma::{
|
|
||||||
OwnedServerName, RoomId, UserId,
|
|
||||||
api::{client::membership::invite_user, federation::membership::create_invite},
|
|
||||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
|
||||||
};
|
|
||||||
use service::Services;
|
|
||||||
|
|
||||||
use super::banned_room_check;
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/invite`
|
|
||||||
///
|
|
||||||
/// Tries to send an invite event into the room.
|
|
||||||
#[tracing::instrument(skip_all, fields(%client), name = "invite")]
|
|
||||||
pub(crate) async fn invite_user_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
|
||||||
body: Ruma<invite_user::v3::Request>,
|
|
||||||
) -> Result<invite_user::v3::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites {
|
|
||||||
debug_error!(
|
|
||||||
"User {sender_user} is not an admin and attempted to send an invite to room {}",
|
|
||||||
&body.room_id
|
|
||||||
);
|
|
||||||
return Err!(Request(Forbidden("Invites are not allowed on this server.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
banned_room_check(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
Some(&body.room_id),
|
|
||||||
body.room_id.server_name(),
|
|
||||||
client,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
match &body.recipient {
|
|
||||||
| invite_user::v3::InvitationRecipient::UserId { user_id } => {
|
|
||||||
let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id);
|
|
||||||
let recipient_ignored_by_sender =
|
|
||||||
services.users.user_is_ignored(user_id, sender_user);
|
|
||||||
|
|
||||||
let (sender_ignored_recipient, recipient_ignored_by_sender) =
|
|
||||||
join!(sender_ignored_recipient, recipient_ignored_by_sender);
|
|
||||||
|
|
||||||
if sender_ignored_recipient {
|
|
||||||
return Ok(invite_user::v3::Response {});
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(target_user_membership) = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(&body.room_id, user_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
if target_user_membership.membership == MembershipState::Ban {
|
|
||||||
return Err!(Request(Forbidden("User is banned from this room.")));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if recipient_ignored_by_sender {
|
|
||||||
// silently drop the invite to the recipient if they've been ignored by the
|
|
||||||
// sender, pretend it worked
|
|
||||||
return Ok(invite_user::v3::Response {});
|
|
||||||
}
|
|
||||||
|
|
||||||
invite_helper(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
user_id,
|
|
||||||
&body.room_id,
|
|
||||||
body.reason.clone(),
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(invite_user::v3::Response {})
|
|
||||||
},
|
|
||||||
| _ => {
|
|
||||||
Err!(Request(NotFound("User not found.")))
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn invite_helper(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
user_id: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
is_direct: bool,
|
|
||||||
) -> Result {
|
|
||||||
if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites {
|
|
||||||
info!(
|
|
||||||
"User {sender_user} is not an admin and attempted to send an invite to room \
|
|
||||||
{room_id}"
|
|
||||||
);
|
|
||||||
return Err!(Request(Forbidden("Invites are not allowed on this server.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services.globals.user_is_local(user_id) {
|
|
||||||
let (pdu, pdu_json, invite_room_state) = {
|
|
||||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
|
||||||
|
|
||||||
let content = RoomMemberEventContent {
|
|
||||||
avatar_url: services.users.avatar_url(user_id).await.ok(),
|
|
||||||
is_direct: Some(is_direct),
|
|
||||||
reason,
|
|
||||||
..RoomMemberEventContent::new(MembershipState::Invite)
|
|
||||||
};
|
|
||||||
|
|
||||||
let (pdu, pdu_json) = services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.create_hash_and_sign_event(
|
|
||||||
PduBuilder::state(user_id.to_string(), &content),
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let invite_room_state = services.rooms.state.summary_stripped(&pdu).await;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
(pdu, pdu_json, invite_room_state)
|
|
||||||
};
|
|
||||||
|
|
||||||
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
|
|
||||||
|
|
||||||
let response = services
|
|
||||||
.sending
|
|
||||||
.send_federation_request(user_id.server_name(), create_invite::v2::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
event_id: (*pdu.event_id).to_owned(),
|
|
||||||
room_version: room_version_id.clone(),
|
|
||||||
event: services
|
|
||||||
.sending
|
|
||||||
.convert_to_outgoing_federation_event(pdu_json.clone())
|
|
||||||
.await,
|
|
||||||
invite_room_state,
|
|
||||||
via: services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.servers_route_via(room_id)
|
|
||||||
.await
|
|
||||||
.ok(),
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// We do not add the event_id field to the pdu here because of signature and
|
|
||||||
// hashes checks
|
|
||||||
let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id)
|
|
||||||
.map_err(|e| {
|
|
||||||
err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}"))))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if pdu.event_id != event_id {
|
|
||||||
return Err!(Request(BadJson(warn!(
|
|
||||||
%pdu.event_id, %event_id,
|
|
||||||
"Server {} sent event with wrong event ID",
|
|
||||||
user_id.server_name()
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
|
|
||||||
let origin: OwnedServerName = serde_json::from_value(serde_json::to_value(
|
|
||||||
value
|
|
||||||
.get("origin")
|
|
||||||
.ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?,
|
|
||||||
)?)
|
|
||||||
.map_err(|e| {
|
|
||||||
err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}"))))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let pdu_id = services
|
|
||||||
.rooms
|
|
||||||
.event_handler
|
|
||||||
.handle_incoming_pdu(&origin, room_id, &event_id, value, true)
|
|
||||||
.boxed()
|
|
||||||
.await?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
err!(Request(InvalidParam("Could not accept incoming PDU as timeline event.")))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
return services.sending.send_pdu_room(room_id, &pdu_id).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
if !services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
return Err!(Request(Forbidden(
|
|
||||||
"You must be joined in the room you are trying to invite from."
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
|
||||||
|
|
||||||
let content = RoomMemberEventContent {
|
|
||||||
displayname: services.users.displayname(user_id).await.ok(),
|
|
||||||
avatar_url: services.users.avatar_url(user_id).await.ok(),
|
|
||||||
blurhash: services.users.blurhash(user_id).await.ok(),
|
|
||||||
is_direct: Some(is_direct),
|
|
||||||
reason,
|
|
||||||
..RoomMemberEventContent::new(MembershipState::Invite)
|
|
||||||
};
|
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder::state(user_id.to_string(), &content),
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
|
@ -1,989 +0,0 @@
|
||||||
use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc};
|
|
||||||
|
|
||||||
use axum::extract::State;
|
|
||||||
use axum_client_ip::InsecureClientIp;
|
|
||||||
use conduwuit::{
|
|
||||||
Err, Result, debug, debug_info, debug_warn, err, error, info,
|
|
||||||
matrix::{
|
|
||||||
StateKey,
|
|
||||||
event::{gen_event_id, gen_event_id_canonical_json},
|
|
||||||
pdu::{PduBuilder, PduEvent},
|
|
||||||
state_res,
|
|
||||||
},
|
|
||||||
result::FlatOk,
|
|
||||||
trace,
|
|
||||||
utils::{
|
|
||||||
self, shuffle,
|
|
||||||
stream::{IterStream, ReadyExt},
|
|
||||||
},
|
|
||||||
warn,
|
|
||||||
};
|
|
||||||
use futures::{FutureExt, StreamExt};
|
|
||||||
use ruma::{
|
|
||||||
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
|
|
||||||
RoomVersionId, UserId,
|
|
||||||
api::{
|
|
||||||
client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
membership::{ThirdPartySigned, join_room_by_id, join_room_by_id_or_alias},
|
|
||||||
},
|
|
||||||
federation::{self},
|
|
||||||
},
|
|
||||||
canonical_json::to_canonical_value,
|
|
||||||
events::{
|
|
||||||
StateEventType,
|
|
||||||
room::{
|
|
||||||
join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent},
|
|
||||||
member::{MembershipState, RoomMemberEventContent},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use service::{
|
|
||||||
Services,
|
|
||||||
appservice::RegistrationInfo,
|
|
||||||
rooms::{
|
|
||||||
state::RoomMutexGuard,
|
|
||||||
state_compressor::{CompressedState, HashSetCompressStateEvent},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::banned_room_check;
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/join`
|
|
||||||
///
|
|
||||||
/// Tries to join the sender user into a room.
|
|
||||||
///
|
|
||||||
/// - If the server knowns about this room: creates the join event and does auth
|
|
||||||
/// rules locally
|
|
||||||
/// - If the server does not know about the room: asks other servers over
|
|
||||||
/// federation
|
|
||||||
#[tracing::instrument(skip_all, fields(%client), name = "join")]
|
|
||||||
pub(crate) async fn join_room_by_id_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
|
||||||
body: Ruma<join_room_by_id::v3::Request>,
|
|
||||||
) -> Result<join_room_by_id::v3::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
banned_room_check(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
Some(&body.room_id),
|
|
||||||
body.room_id.server_name(),
|
|
||||||
client,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// There is no body.server_name for /roomId/join
|
|
||||||
let mut servers: Vec<_> = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.servers_invite_via(&body.room_id)
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
servers.extend(
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.invite_state(sender_user, &body.room_id)
|
|
||||||
.await
|
|
||||||
.unwrap_or_default()
|
|
||||||
.iter()
|
|
||||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
|
||||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
|
||||||
.map(|user| user.server_name().to_owned()),
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(server) = body.room_id.server_name() {
|
|
||||||
servers.push(server.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
servers.sort_unstable();
|
|
||||||
servers.dedup();
|
|
||||||
shuffle(&mut servers);
|
|
||||||
|
|
||||||
join_room_by_id_helper(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
body.reason.clone(),
|
|
||||||
&servers,
|
|
||||||
body.third_party_signed.as_ref(),
|
|
||||||
&body.appservice_info,
|
|
||||||
)
|
|
||||||
.boxed()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}`
|
|
||||||
///
|
|
||||||
/// Tries to join the sender user into a room.
|
|
||||||
///
|
|
||||||
/// - If the server knowns about this room: creates the join event and does auth
|
|
||||||
/// rules locally
|
|
||||||
/// - If the server does not know about the room: use the server name query
|
|
||||||
/// param if specified. if not specified, asks other servers over federation
|
|
||||||
/// via room alias server name and room ID server name
|
|
||||||
#[tracing::instrument(skip_all, fields(%client), name = "join")]
|
|
||||||
pub(crate) async fn join_room_by_id_or_alias_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
|
||||||
body: Ruma<join_room_by_id_or_alias::v3::Request>,
|
|
||||||
) -> Result<join_room_by_id_or_alias::v3::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
let appservice_info = &body.appservice_info;
|
|
||||||
let body = &body.body;
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) {
|
|
||||||
| Ok(room_id) => {
|
|
||||||
banned_room_check(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
Some(&room_id),
|
|
||||||
room_id.server_name(),
|
|
||||||
client,
|
|
||||||
)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut servers = body.via.clone();
|
|
||||||
servers.extend(
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.servers_invite_via(&room_id)
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.await,
|
|
||||||
);
|
|
||||||
|
|
||||||
servers.extend(
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.invite_state(sender_user, &room_id)
|
|
||||||
.await
|
|
||||||
.unwrap_or_default()
|
|
||||||
.iter()
|
|
||||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
|
||||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
|
||||||
.map(|user| user.server_name().to_owned()),
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(server) = room_id.server_name() {
|
|
||||||
servers.push(server.to_owned());
|
|
||||||
}
|
|
||||||
|
|
||||||
servers.sort_unstable();
|
|
||||||
servers.dedup();
|
|
||||||
shuffle(&mut servers);
|
|
||||||
|
|
||||||
(servers, room_id)
|
|
||||||
},
|
|
||||||
| Err(room_alias) => {
|
|
||||||
let (room_id, mut servers) = services
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_alias(&room_alias, Some(body.via.clone()))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
banned_room_check(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
Some(&room_id),
|
|
||||||
Some(room_alias.server_name()),
|
|
||||||
client,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let addl_via_servers = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.servers_invite_via(&room_id)
|
|
||||||
.map(ToOwned::to_owned);
|
|
||||||
|
|
||||||
let addl_state_servers = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.invite_state(sender_user, &room_id)
|
|
||||||
.await
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
let mut addl_servers: Vec<_> = addl_state_servers
|
|
||||||
.iter()
|
|
||||||
.map(|event| event.get_field("sender"))
|
|
||||||
.filter_map(FlatOk::flat_ok)
|
|
||||||
.map(|user: &UserId| user.server_name().to_owned())
|
|
||||||
.stream()
|
|
||||||
.chain(addl_via_servers)
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
addl_servers.sort_unstable();
|
|
||||||
addl_servers.dedup();
|
|
||||||
shuffle(&mut addl_servers);
|
|
||||||
servers.append(&mut addl_servers);
|
|
||||||
|
|
||||||
(servers, room_id)
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let join_room_response = join_room_by_id_helper(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
body.reason.clone(),
|
|
||||||
&servers,
|
|
||||||
body.third_party_signed.as_ref(),
|
|
||||||
appservice_info,
|
|
||||||
)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn join_room_by_id_helper(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
servers: &[OwnedServerName],
|
|
||||||
third_party_signed: Option<&ThirdPartySigned>,
|
|
||||||
appservice_info: &Option<RegistrationInfo>,
|
|
||||||
) -> Result<join_room_by_id::v3::Response> {
|
|
||||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
|
||||||
|
|
||||||
let user_is_guest = services
|
|
||||||
.users
|
|
||||||
.is_deactivated(sender_user)
|
|
||||||
.await
|
|
||||||
.unwrap_or(false)
|
|
||||||
&& appservice_info.is_none();
|
|
||||||
|
|
||||||
if user_is_guest && !services.rooms.state_accessor.guest_can_join(room_id).await {
|
|
||||||
return Err!(Request(Forbidden("Guests are not allowed to join this room")));
|
|
||||||
}
|
|
||||||
|
|
||||||
if services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
debug_warn!("{sender_user} is already joined in {room_id}");
|
|
||||||
return Ok(join_room_by_id::v3::Response { room_id: room_id.into() });
|
|
||||||
}
|
|
||||||
|
|
||||||
let server_in_room = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.server_in_room(services.globals.server_name(), room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// Only check our known membership if we're already in the room.
|
|
||||||
// See: https://forgejo.ellis.link/continuwuation/continuwuity/issues/855
|
|
||||||
let membership = if server_in_room {
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(room_id, sender_user)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
debug!("Ignoring local state for join {room_id}, we aren't in the room yet.");
|
|
||||||
Ok(RoomMemberEventContent::new(MembershipState::Leave))
|
|
||||||
};
|
|
||||||
if let Ok(m) = membership {
|
|
||||||
if m.membership == MembershipState::Ban {
|
|
||||||
debug_warn!("{sender_user} is banned from {room_id} but attempted to join");
|
|
||||||
// TODO: return reason
|
|
||||||
return Err!(Request(Forbidden("You are banned from the room.")));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let local_join = server_in_room
|
|
||||||
|| servers.is_empty()
|
|
||||||
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
|
|
||||||
|
|
||||||
if local_join {
|
|
||||||
join_room_by_id_helper_local(
|
|
||||||
services,
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
reason,
|
|
||||||
servers,
|
|
||||||
third_party_signed,
|
|
||||||
state_lock,
|
|
||||||
)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
} else {
|
|
||||||
// Ask a remote server if we are not participating in this room
|
|
||||||
join_room_by_id_helper_remote(
|
|
||||||
services,
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
reason,
|
|
||||||
servers,
|
|
||||||
third_party_signed,
|
|
||||||
state_lock,
|
|
||||||
)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")]
|
|
||||||
async fn join_room_by_id_helper_remote(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
servers: &[OwnedServerName],
|
|
||||||
_third_party_signed: Option<&ThirdPartySigned>,
|
|
||||||
state_lock: RoomMutexGuard,
|
|
||||||
) -> Result {
|
|
||||||
info!("Joining {room_id} over federation.");
|
|
||||||
|
|
||||||
let (make_join_response, remote_server) =
|
|
||||||
make_join_request(services, sender_user, room_id, servers).await?;
|
|
||||||
|
|
||||||
info!("make_join finished");
|
|
||||||
|
|
||||||
let Some(room_version_id) = make_join_response.room_version else {
|
|
||||||
return Err!(BadServerResponse("Remote room version is not supported by conduwuit"));
|
|
||||||
};
|
|
||||||
|
|
||||||
if !services.server.supported_room_version(&room_version_id) {
|
|
||||||
return Err!(BadServerResponse(
|
|
||||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut join_event_stub: CanonicalJsonObject =
|
|
||||||
serde_json::from_str(make_join_response.event.get()).map_err(|e| {
|
|
||||||
err!(BadServerResponse(warn!(
|
|
||||||
"Invalid make_join event json received from server: {e:?}"
|
|
||||||
)))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let join_authorized_via_users_server = {
|
|
||||||
use RoomVersionId::*;
|
|
||||||
if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
|
|
||||||
join_event_stub
|
|
||||||
.get("content")
|
|
||||||
.map(|s| {
|
|
||||||
s.as_object()?
|
|
||||||
.get("join_authorised_via_users_server")?
|
|
||||||
.as_str()
|
|
||||||
})
|
|
||||||
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
join_event_stub.insert(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),
|
|
||||||
);
|
|
||||||
join_event_stub.insert(
|
|
||||||
"origin_server_ts".to_owned(),
|
|
||||||
CanonicalJsonValue::Integer(
|
|
||||||
utils::millis_since_unix_epoch()
|
|
||||||
.try_into()
|
|
||||||
.expect("Timestamp is valid js_int value"),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
join_event_stub.insert(
|
|
||||||
"content".to_owned(),
|
|
||||||
to_canonical_value(RoomMemberEventContent {
|
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
|
||||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
|
||||||
reason,
|
|
||||||
join_authorized_via_users_server: join_authorized_via_users_server.clone(),
|
|
||||||
..RoomMemberEventContent::new(MembershipState::Join)
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
);
|
|
||||||
|
|
||||||
// We keep the "event_id" in the pdu only in v1 or
|
|
||||||
// v2 rooms
|
|
||||||
match room_version_id {
|
|
||||||
| RoomVersionId::V1 | RoomVersionId::V2 => {},
|
|
||||||
| _ => {
|
|
||||||
join_event_stub.remove("event_id");
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
|
||||||
// to be present
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.hash_and_sign_event(&mut join_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Generate event id
|
|
||||||
let event_id = gen_event_id(&join_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Add event_id back
|
|
||||||
join_event_stub
|
|
||||||
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
|
|
||||||
|
|
||||||
// It has enough fields to be called a proper event now
|
|
||||||
let mut join_event = join_event_stub;
|
|
||||||
|
|
||||||
info!("Asking {remote_server} for send_join in room {room_id}");
|
|
||||||
let send_join_request = federation::membership::create_join_event::v2::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
event_id: event_id.clone(),
|
|
||||||
omit_members: false,
|
|
||||||
pdu: services
|
|
||||||
.sending
|
|
||||||
.convert_to_outgoing_federation_event(join_event.clone())
|
|
||||||
.await,
|
|
||||||
};
|
|
||||||
|
|
||||||
let send_join_response = match services
|
|
||||||
.sending
|
|
||||||
.send_synapse_request(&remote_server, send_join_request)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
| Ok(response) => response,
|
|
||||||
| Err(e) => {
|
|
||||||
error!("send_join failed: {e}");
|
|
||||||
return Err(e);
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
info!("send_join finished");
|
|
||||||
|
|
||||||
if join_authorized_via_users_server.is_some() {
|
|
||||||
if let Some(signed_raw) = &send_join_response.room_state.event {
|
|
||||||
debug_info!(
|
|
||||||
"There is a signed event with join_authorized_via_users_server. This room is \
|
|
||||||
probably using restricted joins. Adding signature to our event"
|
|
||||||
);
|
|
||||||
|
|
||||||
let (signed_event_id, signed_value) =
|
|
||||||
gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| {
|
|
||||||
err!(Request(BadJson(warn!(
|
|
||||||
"Could not convert event to canonical JSON: {e}"
|
|
||||||
))))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if signed_event_id != event_id {
|
|
||||||
return Err!(Request(BadJson(warn!(
|
|
||||||
%signed_event_id, %event_id,
|
|
||||||
"Server {remote_server} sent event with wrong event ID"
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
|
|
||||||
match signed_value["signatures"]
|
|
||||||
.as_object()
|
|
||||||
.ok_or_else(|| {
|
|
||||||
err!(BadServerResponse(warn!(
|
|
||||||
"Server {remote_server} sent invalid signatures type"
|
|
||||||
)))
|
|
||||||
})
|
|
||||||
.and_then(|e| {
|
|
||||||
e.get(remote_server.as_str()).ok_or_else(|| {
|
|
||||||
err!(BadServerResponse(warn!(
|
|
||||||
"Server {remote_server} did not send its signature for a restricted \
|
|
||||||
room"
|
|
||||||
)))
|
|
||||||
})
|
|
||||||
}) {
|
|
||||||
| Ok(signature) => {
|
|
||||||
join_event
|
|
||||||
.get_mut("signatures")
|
|
||||||
.expect("we created a valid pdu")
|
|
||||||
.as_object_mut()
|
|
||||||
.expect("we created a valid pdu")
|
|
||||||
.insert(remote_server.to_string(), signature.clone());
|
|
||||||
},
|
|
||||||
| Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Server {remote_server} sent invalid signature in send_join signatures \
|
|
||||||
for event {signed_value:?}: {e:?}",
|
|
||||||
);
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_or_create_shortroomid(room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
info!("Parsing join event");
|
|
||||||
let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone())
|
|
||||||
.map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?;
|
|
||||||
|
|
||||||
info!("Acquiring server signing keys for response events");
|
|
||||||
let resp_events = &send_join_response.room_state;
|
|
||||||
let resp_state = &resp_events.state;
|
|
||||||
let resp_auth = &resp_events.auth_chain;
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter()))
|
|
||||||
.await;
|
|
||||||
|
|
||||||
info!("Going through send_join response room_state");
|
|
||||||
let cork = services.db.cork_and_flush();
|
|
||||||
let state = send_join_response
|
|
||||||
.room_state
|
|
||||||
.state
|
|
||||||
.iter()
|
|
||||||
.stream()
|
|
||||||
.then(|pdu| {
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
|
|
||||||
})
|
|
||||||
.ready_filter_map(Result::ok)
|
|
||||||
.fold(HashMap::new(), |mut state, (event_id, value)| async move {
|
|
||||||
let pdu = match PduEvent::from_id_val(&event_id, value.clone()) {
|
|
||||||
| Ok(pdu) => pdu,
|
|
||||||
| Err(e) => {
|
|
||||||
debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}");
|
|
||||||
return state;
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
|
|
||||||
if let Some(state_key) = &pdu.state_key {
|
|
||||||
let shortstatekey = services
|
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
state.insert(shortstatekey, pdu.event_id.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
state
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
drop(cork);
|
|
||||||
|
|
||||||
info!("Going through send_join response auth_chain");
|
|
||||||
let cork = services.db.cork_and_flush();
|
|
||||||
send_join_response
|
|
||||||
.room_state
|
|
||||||
.auth_chain
|
|
||||||
.iter()
|
|
||||||
.stream()
|
|
||||||
.then(|pdu| {
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
|
|
||||||
})
|
|
||||||
.ready_filter_map(Result::ok)
|
|
||||||
.ready_for_each(|(event_id, value)| {
|
|
||||||
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
drop(cork);
|
|
||||||
|
|
||||||
debug!("Running send_join auth check");
|
|
||||||
let fetch_state = &state;
|
|
||||||
let state_fetch = |k: StateEventType, s: StateKey| async move {
|
|
||||||
let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?;
|
|
||||||
|
|
||||||
let event_id = fetch_state.get(&shortstatekey)?;
|
|
||||||
services.rooms.timeline.get_pdu(event_id).await.ok()
|
|
||||||
};
|
|
||||||
|
|
||||||
let auth_check = state_res::event_auth::auth_check(
|
|
||||||
&state_res::RoomVersion::new(&room_version_id)?,
|
|
||||||
&parsed_join_pdu,
|
|
||||||
None, // TODO: third party invite
|
|
||||||
|k, s| state_fetch(k.clone(), s.into()),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
|
|
||||||
|
|
||||||
if !auth_check {
|
|
||||||
return Err!(Request(Forbidden("Auth check failed")));
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Compressing state from send_join");
|
|
||||||
let compressed: CompressedState = services
|
|
||||||
.rooms
|
|
||||||
.state_compressor
|
|
||||||
.compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow())))
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
debug!("Saving compressed state");
|
|
||||||
let HashSetCompressStateEvent {
|
|
||||||
shortstatehash: statehash_before_join,
|
|
||||||
added,
|
|
||||||
removed,
|
|
||||||
} = services
|
|
||||||
.rooms
|
|
||||||
.state_compressor
|
|
||||||
.save_state(room_id, Arc::new(compressed))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!("Forcing state for new room");
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state
|
|
||||||
.force_state(room_id, statehash_before_join, added, removed, &state_lock)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Updating joined counts for new room");
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.update_joined_count(room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// We append to state before appending the pdu, so we don't have a moment in
|
|
||||||
// time with the pdu without it's state. This is okay because append_pdu can't
|
|
||||||
// fail.
|
|
||||||
let statehash_after_join = services
|
|
||||||
.rooms
|
|
||||||
.state
|
|
||||||
.append_to_state(&parsed_join_pdu)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Appending new room join event");
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.append_pdu(
|
|
||||||
&parsed_join_pdu,
|
|
||||||
join_event,
|
|
||||||
once(parsed_join_pdu.event_id.borrow()),
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Setting final room state for new room");
|
|
||||||
// We set the room state after inserting the pdu, so that we never have a moment
|
|
||||||
// in time where events in the current room state do not exist
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state
|
|
||||||
.set_room_state(room_id, statehash_after_join, &state_lock);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")]
|
|
||||||
async fn join_room_by_id_helper_local(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
servers: &[OwnedServerName],
|
|
||||||
_third_party_signed: Option<&ThirdPartySigned>,
|
|
||||||
state_lock: RoomMutexGuard,
|
|
||||||
) -> Result {
|
|
||||||
debug_info!("We can join locally");
|
|
||||||
|
|
||||||
let join_rules_event_content = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get_content::<RoomJoinRulesEventContent>(
|
|
||||||
room_id,
|
|
||||||
&StateEventType::RoomJoinRules,
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let restriction_rooms = match join_rules_event_content {
|
|
||||||
| Ok(RoomJoinRulesEventContent {
|
|
||||||
join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted),
|
|
||||||
}) => restricted
|
|
||||||
.allow
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|a| match a {
|
|
||||||
| AllowRule::RoomMembership(r) => Some(r.room_id),
|
|
||||||
| _ => None,
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
| _ => Vec::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let join_authorized_via_users_server: Option<OwnedUserId> = {
|
|
||||||
if restriction_rooms
|
|
||||||
.iter()
|
|
||||||
.stream()
|
|
||||||
.any(|restriction_room_id| {
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, restriction_room_id)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.local_users_in_room(room_id)
|
|
||||||
.filter(|user| {
|
|
||||||
services.rooms.state_accessor.user_can_invite(
|
|
||||||
room_id,
|
|
||||||
user,
|
|
||||||
sender_user,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.boxed()
|
|
||||||
.next()
|
|
||||||
.await
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let content = RoomMemberEventContent {
|
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
|
||||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
|
||||||
reason: reason.clone(),
|
|
||||||
join_authorized_via_users_server,
|
|
||||||
..RoomMemberEventContent::new(MembershipState::Join)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try normal join first
|
|
||||||
let Err(error) = services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder::state(sender_user.to_string(), &content),
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
if restriction_rooms.is_empty()
|
|
||||||
&& (servers.is_empty()
|
|
||||||
|| servers.len() == 1 && services.globals.server_is_ours(&servers[0]))
|
|
||||||
{
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
|
|
||||||
warn!(
|
|
||||||
"We couldn't do the join locally, maybe federation can help to satisfy the restricted \
|
|
||||||
join requirements"
|
|
||||||
);
|
|
||||||
let Ok((make_join_response, remote_server)) =
|
|
||||||
make_join_request(services, sender_user, room_id, servers).await
|
|
||||||
else {
|
|
||||||
return Err(error);
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(room_version_id) = make_join_response.room_version else {
|
|
||||||
return Err!(BadServerResponse("Remote room version is not supported by conduwuit"));
|
|
||||||
};
|
|
||||||
|
|
||||||
if !services.server.supported_room_version(&room_version_id) {
|
|
||||||
return Err!(BadServerResponse(
|
|
||||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut join_event_stub: CanonicalJsonObject =
|
|
||||||
serde_json::from_str(make_join_response.event.get()).map_err(|e| {
|
|
||||||
err!(BadServerResponse("Invalid make_join event json received from server: {e:?}"))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let join_authorized_via_users_server = join_event_stub
|
|
||||||
.get("content")
|
|
||||||
.map(|s| {
|
|
||||||
s.as_object()?
|
|
||||||
.get("join_authorised_via_users_server")?
|
|
||||||
.as_str()
|
|
||||||
})
|
|
||||||
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok());
|
|
||||||
|
|
||||||
join_event_stub.insert(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),
|
|
||||||
);
|
|
||||||
join_event_stub.insert(
|
|
||||||
"origin_server_ts".to_owned(),
|
|
||||||
CanonicalJsonValue::Integer(
|
|
||||||
utils::millis_since_unix_epoch()
|
|
||||||
.try_into()
|
|
||||||
.expect("Timestamp is valid js_int value"),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
join_event_stub.insert(
|
|
||||||
"content".to_owned(),
|
|
||||||
to_canonical_value(RoomMemberEventContent {
|
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
|
||||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
|
||||||
reason,
|
|
||||||
join_authorized_via_users_server,
|
|
||||||
..RoomMemberEventContent::new(MembershipState::Join)
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
);
|
|
||||||
|
|
||||||
// We keep the "event_id" in the pdu only in v1 or
|
|
||||||
// v2 rooms
|
|
||||||
match room_version_id {
|
|
||||||
| RoomVersionId::V1 | RoomVersionId::V2 => {},
|
|
||||||
| _ => {
|
|
||||||
join_event_stub.remove("event_id");
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
|
||||||
// to be present
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.hash_and_sign_event(&mut join_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Generate event id
|
|
||||||
let event_id = gen_event_id(&join_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Add event_id back
|
|
||||||
join_event_stub
|
|
||||||
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
|
|
||||||
|
|
||||||
// It has enough fields to be called a proper event now
|
|
||||||
let join_event = join_event_stub;
|
|
||||||
|
|
||||||
let send_join_response = services
|
|
||||||
.sending
|
|
||||||
.send_synapse_request(
|
|
||||||
&remote_server,
|
|
||||||
federation::membership::create_join_event::v2::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
event_id: event_id.clone(),
|
|
||||||
omit_members: false,
|
|
||||||
pdu: services
|
|
||||||
.sending
|
|
||||||
.convert_to_outgoing_federation_event(join_event.clone())
|
|
||||||
.await,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if let Some(signed_raw) = send_join_response.room_state.event {
|
|
||||||
let (signed_event_id, signed_value) =
|
|
||||||
gen_event_id_canonical_json(&signed_raw, &room_version_id).map_err(|e| {
|
|
||||||
err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}"))))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if signed_event_id != event_id {
|
|
||||||
return Err!(Request(BadJson(
|
|
||||||
warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID")
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.event_handler
|
|
||||||
.handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
} else {
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn make_join_request(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
servers: &[OwnedServerName],
|
|
||||||
) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> {
|
|
||||||
let mut make_join_response_and_server =
|
|
||||||
Err!(BadServerResponse("No server available to assist in joining."));
|
|
||||||
|
|
||||||
let mut make_join_counter: usize = 0;
|
|
||||||
let mut incompatible_room_version_count: usize = 0;
|
|
||||||
|
|
||||||
for remote_server in servers {
|
|
||||||
if services.globals.server_is_ours(remote_server) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
info!("Asking {remote_server} for make_join ({make_join_counter})");
|
|
||||||
let make_join_response = services
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
remote_server,
|
|
||||||
federation::membership::prepare_join_event::v1::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
user_id: sender_user.to_owned(),
|
|
||||||
ver: services.server.supported_room_versions().collect(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
trace!("make_join response: {:?}", make_join_response);
|
|
||||||
make_join_counter = make_join_counter.saturating_add(1);
|
|
||||||
|
|
||||||
if let Err(ref e) = make_join_response {
|
|
||||||
if matches!(
|
|
||||||
e.kind(),
|
|
||||||
ErrorKind::IncompatibleRoomVersion { .. } | ErrorKind::UnsupportedRoomVersion
|
|
||||||
) {
|
|
||||||
incompatible_room_version_count =
|
|
||||||
incompatible_room_version_count.saturating_add(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if incompatible_room_version_count > 15 {
|
|
||||||
info!(
|
|
||||||
"15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or \
|
|
||||||
M_UNSUPPORTED_ROOM_VERSION, assuming that conduwuit does not support the \
|
|
||||||
room version {room_id}: {e}"
|
|
||||||
);
|
|
||||||
make_join_response_and_server =
|
|
||||||
Err!(BadServerResponse("Room version is not supported by Conduwuit"));
|
|
||||||
return make_join_response_and_server;
|
|
||||||
}
|
|
||||||
|
|
||||||
if make_join_counter > 40 {
|
|
||||||
warn!(
|
|
||||||
"40 servers failed to provide valid make_join response, assuming no server \
|
|
||||||
can assist in joining."
|
|
||||||
);
|
|
||||||
make_join_response_and_server =
|
|
||||||
Err!(BadServerResponse("No server available to assist in joining."));
|
|
||||||
|
|
||||||
return make_join_response_and_server;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone()));
|
|
||||||
|
|
||||||
if make_join_response_and_server.is_ok() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
make_join_response_and_server
|
|
||||||
}
|
|
|
@ -1,65 +0,0 @@
|
||||||
use axum::extract::State;
|
|
||||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
|
||||||
use ruma::{
|
|
||||||
api::client::membership::kick_user,
|
|
||||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/kick`
|
|
||||||
///
|
|
||||||
/// Tries to send a kick event into the room.
|
|
||||||
pub(crate) async fn kick_user_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<kick_user::v3::Request>,
|
|
||||||
) -> Result<kick_user::v3::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
|
||||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
|
||||||
|
|
||||||
let Ok(event) = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(&body.room_id, &body.user_id)
|
|
||||||
.await
|
|
||||||
else {
|
|
||||||
// copy synapse's behaviour of returning 200 without any change to the state
|
|
||||||
// instead of erroring on left users
|
|
||||||
return Ok(kick_user::v3::Response::new());
|
|
||||||
};
|
|
||||||
|
|
||||||
if !matches!(
|
|
||||||
event.membership,
|
|
||||||
MembershipState::Invite | MembershipState::Knock | MembershipState::Join,
|
|
||||||
) {
|
|
||||||
return Err!(Request(Forbidden(
|
|
||||||
"Cannot kick a user who is not apart of the room (current membership: {})",
|
|
||||||
event.membership
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Leave,
|
|
||||||
reason: body.reason.clone(),
|
|
||||||
is_direct: None,
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
..event
|
|
||||||
}),
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
Ok(kick_user::v3::Response::new())
|
|
||||||
}
|
|
|
@ -1,770 +0,0 @@
|
||||||
use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc};
|
|
||||||
|
|
||||||
use axum::extract::State;
|
|
||||||
use axum_client_ip::InsecureClientIp;
|
|
||||||
use conduwuit::{
|
|
||||||
Err, Result, debug, debug_info, debug_warn, err, info,
|
|
||||||
matrix::{
|
|
||||||
event::{Event, gen_event_id},
|
|
||||||
pdu::{PduBuilder, PduEvent},
|
|
||||||
},
|
|
||||||
result::FlatOk,
|
|
||||||
trace,
|
|
||||||
utils::{self, shuffle, stream::IterStream},
|
|
||||||
warn,
|
|
||||||
};
|
|
||||||
use futures::{FutureExt, StreamExt};
|
|
||||||
use ruma::{
|
|
||||||
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId,
|
|
||||||
RoomVersionId, UserId,
|
|
||||||
api::{
|
|
||||||
client::knock::knock_room,
|
|
||||||
federation::{self},
|
|
||||||
},
|
|
||||||
canonical_json::to_canonical_value,
|
|
||||||
events::{
|
|
||||||
StateEventType,
|
|
||||||
room::{
|
|
||||||
join_rules::{AllowRule, JoinRule},
|
|
||||||
member::{MembershipState, RoomMemberEventContent},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use service::{
|
|
||||||
Services,
|
|
||||||
rooms::{
|
|
||||||
state::RoomMutexGuard,
|
|
||||||
state_compressor::{CompressedState, HashSetCompressStateEvent},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{banned_room_check, join::join_room_by_id_helper};
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}`
|
|
||||||
///
|
|
||||||
/// Tries to knock the room to ask permission to join for the sender user.
|
|
||||||
#[tracing::instrument(skip_all, fields(%client), name = "knock")]
|
|
||||||
pub(crate) async fn knock_room_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
|
||||||
body: Ruma<knock_room::v3::Request>,
|
|
||||||
) -> Result<knock_room::v3::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
let body = &body.body;
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) {
|
|
||||||
| Ok(room_id) => {
|
|
||||||
banned_room_check(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
Some(&room_id),
|
|
||||||
room_id.server_name(),
|
|
||||||
client,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut servers = body.via.clone();
|
|
||||||
servers.extend(
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.servers_invite_via(&room_id)
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.await,
|
|
||||||
);
|
|
||||||
|
|
||||||
servers.extend(
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.invite_state(sender_user, &room_id)
|
|
||||||
.await
|
|
||||||
.unwrap_or_default()
|
|
||||||
.iter()
|
|
||||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
|
||||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
|
||||||
.map(|user| user.server_name().to_owned()),
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(server) = room_id.server_name() {
|
|
||||||
servers.push(server.to_owned());
|
|
||||||
}
|
|
||||||
|
|
||||||
servers.sort_unstable();
|
|
||||||
servers.dedup();
|
|
||||||
shuffle(&mut servers);
|
|
||||||
|
|
||||||
(servers, room_id)
|
|
||||||
},
|
|
||||||
| Err(room_alias) => {
|
|
||||||
let (room_id, mut servers) = services
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_alias(&room_alias, Some(body.via.clone()))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
banned_room_check(
|
|
||||||
&services,
|
|
||||||
sender_user,
|
|
||||||
Some(&room_id),
|
|
||||||
Some(room_alias.server_name()),
|
|
||||||
client,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let addl_via_servers = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.servers_invite_via(&room_id)
|
|
||||||
.map(ToOwned::to_owned);
|
|
||||||
|
|
||||||
let addl_state_servers = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.invite_state(sender_user, &room_id)
|
|
||||||
.await
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
let mut addl_servers: Vec<_> = addl_state_servers
|
|
||||||
.iter()
|
|
||||||
.map(|event| event.get_field("sender"))
|
|
||||||
.filter_map(FlatOk::flat_ok)
|
|
||||||
.map(|user: &UserId| user.server_name().to_owned())
|
|
||||||
.stream()
|
|
||||||
.chain(addl_via_servers)
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
addl_servers.sort_unstable();
|
|
||||||
addl_servers.dedup();
|
|
||||||
shuffle(&mut addl_servers);
|
|
||||||
servers.append(&mut addl_servers);
|
|
||||||
|
|
||||||
(servers, room_id)
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
knock_room_by_id_helper(&services, sender_user, &room_id, body.reason.clone(), &servers)
|
|
||||||
.boxed()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn knock_room_by_id_helper(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
servers: &[OwnedServerName],
|
|
||||||
) -> Result<knock_room::v3::Response> {
|
|
||||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
|
||||||
|
|
||||||
if services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_invited(sender_user, room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
debug_warn!("{sender_user} is already invited in {room_id} but attempted to knock");
|
|
||||||
return Err!(Request(Forbidden(
|
|
||||||
"You cannot knock on a room you are already invited/accepted to."
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
debug_warn!("{sender_user} is already joined in {room_id} but attempted to knock");
|
|
||||||
return Err!(Request(Forbidden("You cannot knock on a room you are already joined in.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
if services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_knocked(sender_user, room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
debug_warn!("{sender_user} is already knocked in {room_id}");
|
|
||||||
return Ok(knock_room::v3::Response { room_id: room_id.into() });
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(membership) = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(room_id, sender_user)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
if membership.membership == MembershipState::Ban {
|
|
||||||
debug_warn!("{sender_user} is banned from {room_id} but attempted to knock");
|
|
||||||
return Err!(Request(Forbidden("You cannot knock on a room you are banned from.")));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For knock_restricted rooms, check if the user meets the restricted conditions
|
|
||||||
// If they do, attempt to join instead of knock
|
|
||||||
// This is not mentioned in the spec, but should be allowable (we're allowed to
|
|
||||||
// auto-join invites to knocked rooms)
|
|
||||||
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
|
||||||
|
|
||||||
if let JoinRule::KnockRestricted(restricted) = &join_rule {
|
|
||||||
let restriction_rooms: Vec<_> = restricted
|
|
||||||
.allow
|
|
||||||
.iter()
|
|
||||||
.filter_map(|a| match a {
|
|
||||||
| AllowRule::RoomMembership(r) => Some(&r.room_id),
|
|
||||||
| _ => None,
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// Check if the user is in any of the allowed rooms
|
|
||||||
let mut user_meets_restrictions = false;
|
|
||||||
for restriction_room_id in &restriction_rooms {
|
|
||||||
if services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_joined(sender_user, restriction_room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
user_meets_restrictions = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the user meets the restrictions, try joining instead
|
|
||||||
if user_meets_restrictions {
|
|
||||||
debug_info!(
|
|
||||||
"{sender_user} meets the restricted criteria in knock_restricted room \
|
|
||||||
{room_id}, attempting to join instead of knock"
|
|
||||||
);
|
|
||||||
// For this case, we need to drop the state lock and get a new one in
|
|
||||||
// join_room_by_id_helper We need to release the lock here and let
|
|
||||||
// join_room_by_id_helper acquire it again
|
|
||||||
drop(state_lock);
|
|
||||||
match join_room_by_id_helper(
|
|
||||||
services,
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
reason.clone(),
|
|
||||||
servers,
|
|
||||||
None,
|
|
||||||
&None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
|
|
||||||
| Err(e) => {
|
|
||||||
debug_warn!(
|
|
||||||
"Failed to convert knock to join for {sender_user} in {room_id}: {e:?}"
|
|
||||||
);
|
|
||||||
// Get a new state lock for the remaining knock logic
|
|
||||||
let new_state_lock = services.rooms.state.mutex.lock(room_id).await;
|
|
||||||
|
|
||||||
let server_in_room = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.server_in_room(services.globals.server_name(), room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let local_knock = server_in_room
|
|
||||||
|| servers.is_empty()
|
|
||||||
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
|
|
||||||
|
|
||||||
if local_knock {
|
|
||||||
knock_room_helper_local(
|
|
||||||
services,
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
reason,
|
|
||||||
servers,
|
|
||||||
new_state_lock,
|
|
||||||
)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
} else {
|
|
||||||
knock_room_helper_remote(
|
|
||||||
services,
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
reason,
|
|
||||||
servers,
|
|
||||||
new_state_lock,
|
|
||||||
)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(knock_room::v3::Response::new(room_id.to_owned()));
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
|
||||||
debug_warn!(
|
|
||||||
"{sender_user} attempted to knock on room {room_id} but its join rule is \
|
|
||||||
{join_rule:?}, not knock or knock_restricted"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let server_in_room = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.server_in_room(services.globals.server_name(), room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let local_knock = server_in_room
|
|
||||||
|| servers.is_empty()
|
|
||||||
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
|
|
||||||
|
|
||||||
if local_knock {
|
|
||||||
knock_room_helper_local(services, sender_user, room_id, reason, servers, state_lock)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
} else {
|
|
||||||
knock_room_helper_remote(services, sender_user, room_id, reason, servers, state_lock)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(knock_room::v3::Response::new(room_id.to_owned()))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn knock_room_helper_local(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
servers: &[OwnedServerName],
|
|
||||||
state_lock: RoomMutexGuard,
|
|
||||||
) -> Result {
|
|
||||||
debug_info!("We can knock locally");
|
|
||||||
|
|
||||||
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
|
|
||||||
|
|
||||||
if matches!(
|
|
||||||
room_version_id,
|
|
||||||
RoomVersionId::V1
|
|
||||||
| RoomVersionId::V2
|
|
||||||
| RoomVersionId::V3
|
|
||||||
| RoomVersionId::V4
|
|
||||||
| RoomVersionId::V5
|
|
||||||
| RoomVersionId::V6
|
|
||||||
) {
|
|
||||||
return Err!(Request(Forbidden("This room does not support knocking.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
let content = RoomMemberEventContent {
|
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
|
||||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
|
||||||
reason: reason.clone(),
|
|
||||||
..RoomMemberEventContent::new(MembershipState::Knock)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try normal knock first
|
|
||||||
let Err(error) = services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder::state(sender_user.to_string(), &content),
|
|
||||||
sender_user,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
if servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0]))
|
|
||||||
{
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
|
|
||||||
warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock");
|
|
||||||
|
|
||||||
let (make_knock_response, remote_server) =
|
|
||||||
make_knock_request(services, sender_user, room_id, servers).await?;
|
|
||||||
|
|
||||||
info!("make_knock finished");
|
|
||||||
|
|
||||||
let room_version_id = make_knock_response.room_version;
|
|
||||||
|
|
||||||
if !services.server.supported_room_version(&room_version_id) {
|
|
||||||
return Err!(BadServerResponse(
|
|
||||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut knock_event_stub = serde_json::from_str::<CanonicalJsonObject>(
|
|
||||||
make_knock_response.event.get(),
|
|
||||||
)
|
|
||||||
.map_err(|e| {
|
|
||||||
err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}"))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
knock_event_stub.insert(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),
|
|
||||||
);
|
|
||||||
knock_event_stub.insert(
|
|
||||||
"origin_server_ts".to_owned(),
|
|
||||||
CanonicalJsonValue::Integer(
|
|
||||||
utils::millis_since_unix_epoch()
|
|
||||||
.try_into()
|
|
||||||
.expect("Timestamp is valid js_int value"),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
knock_event_stub.insert(
|
|
||||||
"content".to_owned(),
|
|
||||||
to_canonical_value(RoomMemberEventContent {
|
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
|
||||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
|
||||||
reason,
|
|
||||||
..RoomMemberEventContent::new(MembershipState::Knock)
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
);
|
|
||||||
|
|
||||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
|
||||||
// to be present
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.hash_and_sign_event(&mut knock_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Generate event id
|
|
||||||
let event_id = gen_event_id(&knock_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Add event_id
|
|
||||||
knock_event_stub
|
|
||||||
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
|
|
||||||
|
|
||||||
// It has enough fields to be called a proper event now
|
|
||||||
let knock_event = knock_event_stub;
|
|
||||||
|
|
||||||
info!("Asking {remote_server} for send_knock in room {room_id}");
|
|
||||||
let send_knock_request = federation::knock::send_knock::v1::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
event_id: event_id.clone(),
|
|
||||||
pdu: services
|
|
||||||
.sending
|
|
||||||
.convert_to_outgoing_federation_event(knock_event.clone())
|
|
||||||
.await,
|
|
||||||
};
|
|
||||||
|
|
||||||
let send_knock_response = services
|
|
||||||
.sending
|
|
||||||
.send_federation_request(&remote_server, send_knock_request)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("send_knock finished");
|
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_or_create_shortroomid(room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
info!("Parsing knock event");
|
|
||||||
|
|
||||||
let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone())
|
|
||||||
.map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?;
|
|
||||||
|
|
||||||
info!("Updating membership locally to knock state with provided stripped state events");
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.update_membership(
|
|
||||||
room_id,
|
|
||||||
sender_user,
|
|
||||||
parsed_knock_pdu
|
|
||||||
.get_content::<RoomMemberEventContent>()
|
|
||||||
.expect("we just created this"),
|
|
||||||
sender_user,
|
|
||||||
Some(send_knock_response.knock_room_state),
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Appending room knock event locally");
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.append_pdu(
|
|
||||||
&parsed_knock_pdu,
|
|
||||||
knock_event,
|
|
||||||
once(parsed_knock_pdu.event_id.borrow()),
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn knock_room_helper_remote(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
servers: &[OwnedServerName],
|
|
||||||
state_lock: RoomMutexGuard,
|
|
||||||
) -> Result {
|
|
||||||
info!("Knocking {room_id} over federation.");
|
|
||||||
|
|
||||||
let (make_knock_response, remote_server) =
|
|
||||||
make_knock_request(services, sender_user, room_id, servers).await?;
|
|
||||||
|
|
||||||
info!("make_knock finished");
|
|
||||||
|
|
||||||
let room_version_id = make_knock_response.room_version;
|
|
||||||
|
|
||||||
if !services.server.supported_room_version(&room_version_id) {
|
|
||||||
return Err!(BadServerResponse(
|
|
||||||
"Remote room version {room_version_id} is not supported by conduwuit"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut knock_event_stub: CanonicalJsonObject =
|
|
||||||
serde_json::from_str(make_knock_response.event.get()).map_err(|e| {
|
|
||||||
err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}"))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
knock_event_stub.insert(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),
|
|
||||||
);
|
|
||||||
knock_event_stub.insert(
|
|
||||||
"origin_server_ts".to_owned(),
|
|
||||||
CanonicalJsonValue::Integer(
|
|
||||||
utils::millis_since_unix_epoch()
|
|
||||||
.try_into()
|
|
||||||
.expect("Timestamp is valid js_int value"),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
knock_event_stub.insert(
|
|
||||||
"content".to_owned(),
|
|
||||||
to_canonical_value(RoomMemberEventContent {
|
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
|
||||||
blurhash: services.users.blurhash(sender_user).await.ok(),
|
|
||||||
reason,
|
|
||||||
..RoomMemberEventContent::new(MembershipState::Knock)
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
);
|
|
||||||
|
|
||||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
|
||||||
// to be present
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.hash_and_sign_event(&mut knock_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Generate event id
|
|
||||||
let event_id = gen_event_id(&knock_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Add event_id
|
|
||||||
knock_event_stub
|
|
||||||
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
|
|
||||||
|
|
||||||
// It has enough fields to be called a proper event now
|
|
||||||
let knock_event = knock_event_stub;
|
|
||||||
|
|
||||||
info!("Asking {remote_server} for send_knock in room {room_id}");
|
|
||||||
let send_knock_request = federation::knock::send_knock::v1::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
event_id: event_id.clone(),
|
|
||||||
pdu: services
|
|
||||||
.sending
|
|
||||||
.convert_to_outgoing_federation_event(knock_event.clone())
|
|
||||||
.await,
|
|
||||||
};
|
|
||||||
|
|
||||||
let send_knock_response = services
|
|
||||||
.sending
|
|
||||||
.send_federation_request(&remote_server, send_knock_request)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("send_knock finished");
|
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_or_create_shortroomid(room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
info!("Parsing knock event");
|
|
||||||
let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone())
|
|
||||||
.map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?;
|
|
||||||
|
|
||||||
info!("Going through send_knock response knock state events");
|
|
||||||
let state = send_knock_response
|
|
||||||
.knock_room_state
|
|
||||||
.iter()
|
|
||||||
.map(|event| serde_json::from_str::<CanonicalJsonObject>(event.clone().into_json().get()))
|
|
||||||
.filter_map(Result::ok);
|
|
||||||
|
|
||||||
let mut state_map: HashMap<u64, OwnedEventId> = HashMap::new();
|
|
||||||
|
|
||||||
for event in state {
|
|
||||||
let Some(state_key) = event.get("state_key") else {
|
|
||||||
debug_warn!("send_knock stripped state event missing state_key: {event:?}");
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let Some(event_type) = event.get("type") else {
|
|
||||||
debug_warn!("send_knock stripped state event missing event type: {event:?}");
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Ok(state_key) = serde_json::from_value::<String>(state_key.clone().into()) else {
|
|
||||||
debug_warn!("send_knock stripped state event has invalid state_key: {event:?}");
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let Ok(event_type) = serde_json::from_value::<StateEventType>(event_type.clone().into())
|
|
||||||
else {
|
|
||||||
debug_warn!("send_knock stripped state event has invalid event type: {event:?}");
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let event_id = gen_event_id(&event, &room_version_id)?;
|
|
||||||
let shortstatekey = services
|
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_or_create_shortstatekey(&event_type, &state_key)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
services.rooms.outlier.add_pdu_outlier(&event_id, &event);
|
|
||||||
state_map.insert(shortstatekey, event_id.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Compressing state from send_knock");
|
|
||||||
let compressed: CompressedState = services
|
|
||||||
.rooms
|
|
||||||
.state_compressor
|
|
||||||
.compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow())))
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
debug!("Saving compressed state");
|
|
||||||
let HashSetCompressStateEvent {
|
|
||||||
shortstatehash: statehash_before_knock,
|
|
||||||
added,
|
|
||||||
removed,
|
|
||||||
} = services
|
|
||||||
.rooms
|
|
||||||
.state_compressor
|
|
||||||
.save_state(room_id, Arc::new(compressed))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!("Forcing state for new room");
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state
|
|
||||||
.force_state(room_id, statehash_before_knock, added, removed, &state_lock)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let statehash_after_knock = services
|
|
||||||
.rooms
|
|
||||||
.state
|
|
||||||
.append_to_state(&parsed_knock_pdu)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Updating membership locally to knock state with provided stripped state events");
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.update_membership(
|
|
||||||
room_id,
|
|
||||||
sender_user,
|
|
||||||
parsed_knock_pdu
|
|
||||||
.get_content::<RoomMemberEventContent>()
|
|
||||||
.expect("we just created this"),
|
|
||||||
sender_user,
|
|
||||||
Some(send_knock_response.knock_room_state),
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Appending room knock event locally");
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.append_pdu(
|
|
||||||
&parsed_knock_pdu,
|
|
||||||
knock_event,
|
|
||||||
once(parsed_knock_pdu.event_id.borrow()),
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
info!("Setting final room state for new room");
|
|
||||||
// We set the room state after inserting the pdu, so that we never have a moment
|
|
||||||
// in time where events in the current room state do not exist
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state
|
|
||||||
.set_room_state(room_id, statehash_after_knock, &state_lock);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn make_knock_request(
|
|
||||||
services: &Services,
|
|
||||||
sender_user: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
servers: &[OwnedServerName],
|
|
||||||
) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> {
|
|
||||||
let mut make_knock_response_and_server =
|
|
||||||
Err!(BadServerResponse("No server available to assist in knocking."));
|
|
||||||
|
|
||||||
let mut make_knock_counter: usize = 0;
|
|
||||||
|
|
||||||
for remote_server in servers {
|
|
||||||
if services.globals.server_is_ours(remote_server) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Asking {remote_server} for make_knock ({make_knock_counter})");
|
|
||||||
|
|
||||||
let make_knock_response = services
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
remote_server,
|
|
||||||
federation::knock::create_knock_event_template::v1::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
user_id: sender_user.to_owned(),
|
|
||||||
ver: services.server.supported_room_versions().collect(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
trace!("make_knock response: {make_knock_response:?}");
|
|
||||||
make_knock_counter = make_knock_counter.saturating_add(1);
|
|
||||||
|
|
||||||
make_knock_response_and_server = make_knock_response.map(|r| (r, remote_server.clone()));
|
|
||||||
|
|
||||||
if make_knock_response_and_server.is_ok() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if make_knock_counter > 40 {
|
|
||||||
warn!(
|
|
||||||
"50 servers failed to provide valid make_knock response, assuming no server can \
|
|
||||||
assist in knocking."
|
|
||||||
);
|
|
||||||
make_knock_response_and_server =
|
|
||||||
Err!(BadServerResponse("No server available to assist in knocking."));
|
|
||||||
|
|
||||||
return make_knock_response_and_server;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
make_knock_response_and_server
|
|
||||||
}
|
|
|
@ -1,386 +0,0 @@
|
||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use axum::extract::State;
|
|
||||||
use conduwuit::{
|
|
||||||
Err, Result, debug_info, debug_warn, err,
|
|
||||||
matrix::{event::gen_event_id, pdu::PduBuilder},
|
|
||||||
utils::{self, FutureBoolExt, future::ReadyEqExt},
|
|
||||||
warn,
|
|
||||||
};
|
|
||||||
use futures::{FutureExt, StreamExt, TryFutureExt, pin_mut};
|
|
||||||
use ruma::{
|
|
||||||
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, RoomVersionId, UserId,
|
|
||||||
api::{
|
|
||||||
client::membership::leave_room,
|
|
||||||
federation::{self},
|
|
||||||
},
|
|
||||||
events::{
|
|
||||||
StateEventType,
|
|
||||||
room::member::{MembershipState, RoomMemberEventContent},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use service::Services;
|
|
||||||
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/rooms/{roomId}/leave`
|
|
||||||
///
|
|
||||||
/// Tries to leave the sender user from a room.
|
|
||||||
///
|
|
||||||
/// - This should always work if the user is currently joined.
|
|
||||||
pub(crate) async fn leave_room_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<leave_room::v3::Request>,
|
|
||||||
) -> Result<leave_room::v3::Response> {
|
|
||||||
leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone())
|
|
||||||
.boxed()
|
|
||||||
.await
|
|
||||||
.map(|()| leave_room::v3::Response::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms,
|
|
||||||
// and ignores errors
|
|
||||||
pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
|
|
||||||
let rooms_joined = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(user_id)
|
|
||||||
.map(ToOwned::to_owned);
|
|
||||||
|
|
||||||
let rooms_invited = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_invited(user_id)
|
|
||||||
.map(|(r, _)| r);
|
|
||||||
|
|
||||||
let rooms_knocked = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_knocked(user_id)
|
|
||||||
.map(|(r, _)| r);
|
|
||||||
|
|
||||||
let all_rooms: Vec<_> = rooms_joined
|
|
||||||
.chain(rooms_invited)
|
|
||||||
.chain(rooms_knocked)
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
for room_id in all_rooms {
|
|
||||||
// ignore errors
|
|
||||||
if let Err(e) = leave_room(services, user_id, &room_id, None).boxed().await {
|
|
||||||
warn!(%user_id, "Failed to leave {room_id} remotely: {e}");
|
|
||||||
}
|
|
||||||
|
|
||||||
services.rooms.state_cache.forget(&room_id, user_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn leave_room(
|
|
||||||
services: &Services,
|
|
||||||
user_id: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
) -> Result {
|
|
||||||
let default_member_content = RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Leave,
|
|
||||||
reason: reason.clone(),
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
is_direct: None,
|
|
||||||
avatar_url: None,
|
|
||||||
displayname: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: None,
|
|
||||||
redact_events: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let is_banned = services.rooms.metadata.is_banned(room_id);
|
|
||||||
let is_disabled = services.rooms.metadata.is_disabled(room_id);
|
|
||||||
|
|
||||||
pin_mut!(is_banned, is_disabled);
|
|
||||||
if is_banned.or(is_disabled).await {
|
|
||||||
// the room is banned/disabled, the room must be rejected locally since we
|
|
||||||
// cant/dont want to federate with this server
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.update_membership(
|
|
||||||
room_id,
|
|
||||||
user_id,
|
|
||||||
default_member_content,
|
|
||||||
user_id,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let dont_have_room = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.server_in_room(services.globals.server_name(), room_id)
|
|
||||||
.eq(&false);
|
|
||||||
|
|
||||||
let not_knocked = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.is_knocked(user_id, room_id)
|
|
||||||
.eq(&false);
|
|
||||||
|
|
||||||
// Ask a remote server if we don't have this room and are not knocking on it
|
|
||||||
if dont_have_room.and(not_knocked).await {
|
|
||||||
if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone())
|
|
||||||
.boxed()
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
warn!(%user_id, "Failed to leave room {room_id} remotely: {e}");
|
|
||||||
// Don't tell the client about this error
|
|
||||||
}
|
|
||||||
|
|
||||||
let last_state = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.invite_state(user_id, room_id)
|
|
||||||
.or_else(|_| services.rooms.state_cache.knock_state(user_id, room_id))
|
|
||||||
.or_else(|_| services.rooms.state_cache.left_state(user_id, room_id))
|
|
||||||
.await
|
|
||||||
.ok();
|
|
||||||
|
|
||||||
// We always drop the invite, we can't rely on other servers
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.update_membership(
|
|
||||||
room_id,
|
|
||||||
user_id,
|
|
||||||
default_member_content,
|
|
||||||
user_id,
|
|
||||||
last_state,
|
|
||||||
None,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
} else {
|
|
||||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
|
||||||
|
|
||||||
let Ok(event) = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_get_content::<RoomMemberEventContent>(
|
|
||||||
room_id,
|
|
||||||
&StateEventType::RoomMember,
|
|
||||||
user_id.as_str(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
else {
|
|
||||||
debug_warn!(
|
|
||||||
"Trying to leave a room you are not a member of, marking room as left locally."
|
|
||||||
);
|
|
||||||
|
|
||||||
return services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.update_membership(
|
|
||||||
room_id,
|
|
||||||
user_id,
|
|
||||||
default_member_content,
|
|
||||||
user_id,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
};
|
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder::state(user_id.to_string(), &RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Leave,
|
|
||||||
reason,
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
is_direct: None,
|
|
||||||
..event
|
|
||||||
}),
|
|
||||||
user_id,
|
|
||||||
room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn remote_leave_room(
|
|
||||||
services: &Services,
|
|
||||||
user_id: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
reason: Option<String>,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut make_leave_response_and_server =
|
|
||||||
Err!(BadServerResponse("No remote server available to assist in leaving {room_id}."));
|
|
||||||
|
|
||||||
let mut servers: HashSet<OwnedServerName> = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.servers_invite_via(room_id)
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.invite_state(user_id, room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
| Ok(invite_state) => {
|
|
||||||
servers.extend(
|
|
||||||
invite_state
|
|
||||||
.iter()
|
|
||||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
|
||||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
|
||||||
.map(|user| user.server_name().to_owned()),
|
|
||||||
);
|
|
||||||
},
|
|
||||||
| _ => {
|
|
||||||
match services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.knock_state(user_id, room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
| Ok(knock_state) => {
|
|
||||||
servers.extend(
|
|
||||||
knock_state
|
|
||||||
.iter()
|
|
||||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
|
||||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
|
||||||
.filter_map(|sender| {
|
|
||||||
if !services.globals.user_is_local(sender) {
|
|
||||||
Some(sender.server_name().to_owned())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
},
|
|
||||||
| _ => {},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(room_id_server_name) = room_id.server_name() {
|
|
||||||
servers.insert(room_id_server_name.to_owned());
|
|
||||||
}
|
|
||||||
|
|
||||||
debug_info!("servers in remote_leave_room: {servers:?}");
|
|
||||||
|
|
||||||
for remote_server in servers {
|
|
||||||
let make_leave_response = services
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
&remote_server,
|
|
||||||
federation::membership::prepare_leave_event::v1::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
user_id: user_id.to_owned(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server));
|
|
||||||
|
|
||||||
if make_leave_response_and_server.is_ok() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let (make_leave_response, remote_server) = make_leave_response_and_server?;
|
|
||||||
|
|
||||||
let Some(room_version_id) = make_leave_response.room_version else {
|
|
||||||
return Err!(BadServerResponse(warn!(
|
|
||||||
"No room version was returned by {remote_server} for {room_id}, room version is \
|
|
||||||
likely not supported by conduwuit"
|
|
||||||
)));
|
|
||||||
};
|
|
||||||
|
|
||||||
if !services.server.supported_room_version(&room_version_id) {
|
|
||||||
return Err!(BadServerResponse(warn!(
|
|
||||||
"Remote room version {room_version_id} for {room_id} is not supported by conduwuit",
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut leave_event_stub = serde_json::from_str::<CanonicalJsonObject>(
|
|
||||||
make_leave_response.event.get(),
|
|
||||||
)
|
|
||||||
.map_err(|e| {
|
|
||||||
err!(BadServerResponse(warn!(
|
|
||||||
"Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}"
|
|
||||||
)))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// TODO: Is origin needed?
|
|
||||||
leave_event_stub.insert(
|
|
||||||
"origin".to_owned(),
|
|
||||||
CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()),
|
|
||||||
);
|
|
||||||
leave_event_stub.insert(
|
|
||||||
"origin_server_ts".to_owned(),
|
|
||||||
CanonicalJsonValue::Integer(
|
|
||||||
utils::millis_since_unix_epoch()
|
|
||||||
.try_into()
|
|
||||||
.expect("Timestamp is valid js_int value"),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
// Inject the reason key into the event content dict if it exists
|
|
||||||
if let Some(reason) = reason {
|
|
||||||
if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") {
|
|
||||||
content.insert("reason".to_owned(), CanonicalJsonValue::String(reason));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// room v3 and above removed the "event_id" field from remote PDU format
|
|
||||||
match room_version_id {
|
|
||||||
| RoomVersionId::V1 | RoomVersionId::V2 => {},
|
|
||||||
| _ => {
|
|
||||||
leave_event_stub.remove("event_id");
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// In order to create a compatible ref hash (EventID) the `hashes` field needs
|
|
||||||
// to be present
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.hash_and_sign_event(&mut leave_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Generate event id
|
|
||||||
let event_id = gen_event_id(&leave_event_stub, &room_version_id)?;
|
|
||||||
|
|
||||||
// Add event_id back
|
|
||||||
leave_event_stub
|
|
||||||
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
|
|
||||||
|
|
||||||
// It has enough fields to be called a proper event now
|
|
||||||
let leave_event = leave_event_stub;
|
|
||||||
|
|
||||||
services
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
&remote_server,
|
|
||||||
federation::membership::create_leave_event::v2::Request {
|
|
||||||
room_id: room_id.to_owned(),
|
|
||||||
event_id,
|
|
||||||
pdu: services
|
|
||||||
.sending
|
|
||||||
.convert_to_outgoing_federation_event(leave_event.clone())
|
|
||||||
.await,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
|
@ -1,147 +0,0 @@
|
||||||
use axum::extract::State;
|
|
||||||
use conduwuit::{
|
|
||||||
Err, Event, Result, at,
|
|
||||||
utils::{
|
|
||||||
future::TryExtExt,
|
|
||||||
stream::{BroadbandExt, ReadyExt},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use futures::{FutureExt, StreamExt, future::join};
|
|
||||||
use ruma::{
|
|
||||||
api::client::membership::{
|
|
||||||
get_member_events::{self, v3::MembershipEventFilter},
|
|
||||||
joined_members::{self, v3::RoomMember},
|
|
||||||
},
|
|
||||||
events::{
|
|
||||||
StateEventType,
|
|
||||||
room::member::{MembershipState, RoomMemberEventContent},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/members`
|
|
||||||
///
|
|
||||||
/// Lists all joined users in a room (TODO: at a specific point in time, with a
|
|
||||||
/// specific membership).
|
|
||||||
///
|
|
||||||
/// - Only works if the user is currently joined
|
|
||||||
pub(crate) async fn get_member_events_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<get_member_events::v3::Request>,
|
|
||||||
) -> Result<get_member_events::v3::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
let membership = body.membership.as_ref();
|
|
||||||
let not_membership = body.not_membership.as_ref();
|
|
||||||
|
|
||||||
if !services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
return Err!(Request(Forbidden("You don't have permission to view this room.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_member_events::v3::Response {
|
|
||||||
chunk: services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_full(&body.room_id)
|
|
||||||
.ready_filter_map(Result::ok)
|
|
||||||
.ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember)
|
|
||||||
.map(at!(1))
|
|
||||||
.ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership))
|
|
||||||
.map(Event::into_format)
|
|
||||||
.collect()
|
|
||||||
.boxed()
|
|
||||||
.await,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members`
|
|
||||||
///
|
|
||||||
/// Lists all members of a room.
|
|
||||||
///
|
|
||||||
/// - The sender user must be in the room
|
|
||||||
/// - TODO: An appservice just needs a puppet joined
|
|
||||||
pub(crate) async fn joined_members_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<joined_members::v3::Request>,
|
|
||||||
) -> Result<joined_members::v3::Response> {
|
|
||||||
if !services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_state_events(body.sender_user(), &body.room_id)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
return Err!(Request(Forbidden("You don't have permission to view this room.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(joined_members::v3::Response {
|
|
||||||
joined: services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.room_members(&body.room_id)
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.broad_then(|user_id| async move {
|
|
||||||
let (display_name, avatar_url) = join(
|
|
||||||
services.users.displayname(&user_id).ok(),
|
|
||||||
services.users.avatar_url(&user_id).ok(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
(user_id, RoomMember { display_name, avatar_url })
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
.await,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn membership_filter<Pdu: Event>(
|
|
||||||
pdu: Pdu,
|
|
||||||
for_membership: Option<&MembershipEventFilter>,
|
|
||||||
not_membership: Option<&MembershipEventFilter>,
|
|
||||||
) -> Option<impl Event> {
|
|
||||||
let membership_state_filter = match for_membership {
|
|
||||||
| Some(MembershipEventFilter::Ban) => MembershipState::Ban,
|
|
||||||
| Some(MembershipEventFilter::Invite) => MembershipState::Invite,
|
|
||||||
| Some(MembershipEventFilter::Knock) => MembershipState::Knock,
|
|
||||||
| Some(MembershipEventFilter::Leave) => MembershipState::Leave,
|
|
||||||
| Some(_) | None => MembershipState::Join,
|
|
||||||
};
|
|
||||||
|
|
||||||
let not_membership_state_filter = match not_membership {
|
|
||||||
| Some(MembershipEventFilter::Ban) => MembershipState::Ban,
|
|
||||||
| Some(MembershipEventFilter::Invite) => MembershipState::Invite,
|
|
||||||
| Some(MembershipEventFilter::Join) => MembershipState::Join,
|
|
||||||
| Some(MembershipEventFilter::Knock) => MembershipState::Knock,
|
|
||||||
| Some(_) | None => MembershipState::Leave,
|
|
||||||
};
|
|
||||||
|
|
||||||
let evt_membership = pdu.get_content::<RoomMemberEventContent>().ok()?.membership;
|
|
||||||
|
|
||||||
if for_membership.is_some() && not_membership.is_some() {
|
|
||||||
if membership_state_filter != evt_membership
|
|
||||||
|| not_membership_state_filter == evt_membership
|
|
||||||
{
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(pdu)
|
|
||||||
}
|
|
||||||
} else if for_membership.is_some() && not_membership.is_none() {
|
|
||||||
if membership_state_filter != evt_membership {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(pdu)
|
|
||||||
}
|
|
||||||
} else if not_membership.is_some() && for_membership.is_none() {
|
|
||||||
if not_membership_state_filter == evt_membership {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(pdu)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Some(pdu)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,156 +0,0 @@
|
||||||
mod ban;
|
|
||||||
mod forget;
|
|
||||||
mod invite;
|
|
||||||
mod join;
|
|
||||||
mod kick;
|
|
||||||
mod knock;
|
|
||||||
mod leave;
|
|
||||||
mod members;
|
|
||||||
mod unban;
|
|
||||||
|
|
||||||
use std::net::IpAddr;
|
|
||||||
|
|
||||||
use axum::extract::State;
|
|
||||||
use conduwuit::{Err, Result, warn};
|
|
||||||
use futures::{FutureExt, StreamExt};
|
|
||||||
use ruma::{OwnedRoomId, RoomId, ServerName, UserId, api::client::membership::joined_rooms};
|
|
||||||
use service::Services;
|
|
||||||
|
|
||||||
pub(crate) use self::{
|
|
||||||
ban::ban_user_route,
|
|
||||||
forget::forget_room_route,
|
|
||||||
invite::{invite_helper, invite_user_route},
|
|
||||||
join::{join_room_by_id_or_alias_route, join_room_by_id_route},
|
|
||||||
kick::kick_user_route,
|
|
||||||
knock::knock_room_route,
|
|
||||||
leave::leave_room_route,
|
|
||||||
members::{get_member_events_route, joined_members_route},
|
|
||||||
unban::unban_user_route,
|
|
||||||
};
|
|
||||||
pub use self::{
|
|
||||||
join::join_room_by_id_helper,
|
|
||||||
leave::{leave_all_rooms, leave_room},
|
|
||||||
};
|
|
||||||
use crate::{Ruma, client::full_user_deactivate};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/joined_rooms`
|
|
||||||
///
|
|
||||||
/// Lists all rooms the user has joined.
|
|
||||||
pub(crate) async fn joined_rooms_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<joined_rooms::v3::Request>,
|
|
||||||
) -> Result<joined_rooms::v3::Response> {
|
|
||||||
Ok(joined_rooms::v3::Response {
|
|
||||||
joined_rooms: services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(body.sender_user())
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.collect()
|
|
||||||
.await,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the room is banned in any way possible and the sender user is not
|
|
||||||
/// an admin.
|
|
||||||
///
|
|
||||||
/// Performs automatic deactivation if `auto_deactivate_banned_room_attempts` is
|
|
||||||
/// enabled
|
|
||||||
#[tracing::instrument(skip(services))]
|
|
||||||
pub(crate) async fn banned_room_check(
|
|
||||||
services: &Services,
|
|
||||||
user_id: &UserId,
|
|
||||||
room_id: Option<&RoomId>,
|
|
||||||
server_name: Option<&ServerName>,
|
|
||||||
client_ip: IpAddr,
|
|
||||||
) -> Result {
|
|
||||||
if services.users.is_admin(user_id).await {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(room_id) = room_id {
|
|
||||||
if services.rooms.metadata.is_banned(room_id).await
|
|
||||||
|| services
|
|
||||||
.moderation
|
|
||||||
.is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid"))
|
|
||||||
{
|
|
||||||
warn!(
|
|
||||||
"User {user_id} who is not an admin attempted to send an invite for or \
|
|
||||||
attempted to join a banned room or banned room server name: {room_id}"
|
|
||||||
);
|
|
||||||
|
|
||||||
if services.server.config.auto_deactivate_banned_room_attempts {
|
|
||||||
warn!(
|
|
||||||
"Automatically deactivating user {user_id} due to attempted banned room join"
|
|
||||||
);
|
|
||||||
|
|
||||||
if services.server.config.admin_room_notices {
|
|
||||||
services
|
|
||||||
.admin
|
|
||||||
.send_text(&format!(
|
|
||||||
"Automatically deactivating user {user_id} due to attempted banned \
|
|
||||||
room join from IP {client_ip}"
|
|
||||||
))
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let all_joined_rooms: Vec<OwnedRoomId> = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(user_id)
|
|
||||||
.map(Into::into)
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
full_user_deactivate(services, user_id, &all_joined_rooms)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
|
|
||||||
}
|
|
||||||
} else if let Some(server_name) = server_name {
|
|
||||||
if services
|
|
||||||
.config
|
|
||||||
.forbidden_remote_server_names
|
|
||||||
.is_match(server_name.host())
|
|
||||||
{
|
|
||||||
warn!(
|
|
||||||
"User {user_id} who is not an admin tried joining a room which has the server \
|
|
||||||
name {server_name} that is globally forbidden. Rejecting.",
|
|
||||||
);
|
|
||||||
|
|
||||||
if services.server.config.auto_deactivate_banned_room_attempts {
|
|
||||||
warn!(
|
|
||||||
"Automatically deactivating user {user_id} due to attempted banned room join"
|
|
||||||
);
|
|
||||||
|
|
||||||
if services.server.config.admin_room_notices {
|
|
||||||
services
|
|
||||||
.admin
|
|
||||||
.send_text(&format!(
|
|
||||||
"Automatically deactivating user {user_id} due to attempted banned \
|
|
||||||
room join from IP {client_ip}"
|
|
||||||
))
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let all_joined_rooms: Vec<OwnedRoomId> = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.rooms_joined(user_id)
|
|
||||||
.map(Into::into)
|
|
||||||
.collect()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
full_user_deactivate(services, user_id, &all_joined_rooms)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Err!(Request(Forbidden("This remote server is banned on this homeserver.")));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
|
@ -1,58 +0,0 @@
|
||||||
use axum::extract::State;
|
|
||||||
use conduwuit::{Err, Result, matrix::pdu::PduBuilder};
|
|
||||||
use ruma::{
|
|
||||||
api::client::membership::unban_user,
|
|
||||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/unban`
|
|
||||||
///
|
|
||||||
/// Tries to send an unban event into the room.
|
|
||||||
pub(crate) async fn unban_user_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<unban_user::v3::Request>,
|
|
||||||
) -> Result<unban_user::v3::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
|
||||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
|
||||||
|
|
||||||
let current_member_content = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.get_member(&body.room_id, &body.user_id)
|
|
||||||
.await
|
|
||||||
.unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Leave));
|
|
||||||
|
|
||||||
if current_member_content.membership != MembershipState::Ban {
|
|
||||||
return Err!(Request(Forbidden(
|
|
||||||
"Cannot unban a user who is not banned (current membership: {})",
|
|
||||||
current_member_content.membership
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Leave,
|
|
||||||
reason: body.reason.clone(),
|
|
||||||
join_authorized_via_users_server: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
is_direct: None,
|
|
||||||
..current_member_content
|
|
||||||
}),
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&state_lock,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
Ok(unban_user::v3::Response::new())
|
|
||||||
}
|
|
|
@ -1,11 +1,12 @@
|
||||||
|
use core::panic;
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, at,
|
Err, Result, at,
|
||||||
matrix::{
|
matrix::{
|
||||||
event::{Event, Matches},
|
Event,
|
||||||
pdu::PduCount,
|
pdu::{PduCount, PduEvent},
|
||||||
},
|
},
|
||||||
ref_at,
|
|
||||||
utils::{
|
utils::{
|
||||||
IterStream, ReadyExt,
|
IterStream, ReadyExt,
|
||||||
result::{FlatOk, LogErr},
|
result::{FlatOk, LogErr},
|
||||||
|
@ -33,7 +34,6 @@ use ruma::{
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
};
|
};
|
||||||
use tracing::warn;
|
|
||||||
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ pub(crate) async fn get_message_events_route(
|
||||||
) -> Result<get_message_events::v3::Response> {
|
) -> Result<get_message_events::v3::Response> {
|
||||||
debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted");
|
debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted");
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user();
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_ref();
|
||||||
let room_id = &body.room_id;
|
let room_id = &body.room_id;
|
||||||
let filter = &body.filter;
|
let filter = &body.filter;
|
||||||
|
|
||||||
|
@ -137,17 +137,18 @@ pub(crate) async fn get_message_events_route(
|
||||||
|
|
||||||
let lazy_loading_context = lazy_loading::Context {
|
let lazy_loading_context = lazy_loading::Context {
|
||||||
user_id: sender_user,
|
user_id: sender_user,
|
||||||
device_id: sender_device.or_else(|| {
|
device_id: match sender_device {
|
||||||
|
| Some(device_id) => device_id,
|
||||||
|
| None =>
|
||||||
if let Some(registration) = body.appservice_info.as_ref() {
|
if let Some(registration) = body.appservice_info.as_ref() {
|
||||||
Some(<&DeviceId>::from(registration.registration.id.as_str()))
|
<&DeviceId>::from(registration.registration.id.as_str())
|
||||||
} else {
|
} else {
|
||||||
warn!(
|
panic!(
|
||||||
"No device_id provided and no appservice registration found, this should be \
|
"No device_id provided and no appservice registration found, this \
|
||||||
unreachable"
|
should be unreachable"
|
||||||
);
|
);
|
||||||
None
|
},
|
||||||
}
|
},
|
||||||
}),
|
|
||||||
room_id,
|
room_id,
|
||||||
token: Some(from.into_unsigned()),
|
token: Some(from.into_unsigned()),
|
||||||
options: Some(&filter.lazy_load_options),
|
options: Some(&filter.lazy_load_options),
|
||||||
|
@ -176,7 +177,7 @@ pub(crate) async fn get_message_events_route(
|
||||||
let chunk = events
|
let chunk = events
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(at!(1))
|
.map(at!(1))
|
||||||
.map(Event::into_format)
|
.map(PduEvent::into_room_event)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(get_message_events::v3::Response {
|
Ok(get_message_events::v3::Response {
|
||||||
|
@ -217,9 +218,7 @@ where
|
||||||
pin_mut!(receipts);
|
pin_mut!(receipts);
|
||||||
let witness: Witness = events
|
let witness: Witness = events
|
||||||
.stream()
|
.stream()
|
||||||
.map(ref_at!(1))
|
.map(|(_, pdu)| pdu.sender.clone())
|
||||||
.map(Event::sender)
|
|
||||||
.map(ToOwned::to_owned)
|
|
||||||
.chain(
|
.chain(
|
||||||
receipts
|
receipts
|
||||||
.ready_take_while(|(_, c, _)| *c <= newest.into_unsigned())
|
.ready_take_while(|(_, c, _)| *c <= newest.into_unsigned())
|
||||||
|
@ -244,7 +243,7 @@ async fn get_member_event(
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())
|
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())
|
||||||
.map_ok(Event::into_format)
|
.map_ok(PduEvent::into_state_event)
|
||||||
.await
|
.await
|
||||||
.ok()
|
.ok()
|
||||||
}
|
}
|
||||||
|
@ -264,33 +263,27 @@ pub(crate) async fn ignored_filter(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(crate) async fn is_ignored_pdu<Pdu>(
|
pub(crate) async fn is_ignored_pdu(
|
||||||
services: &Services,
|
services: &Services,
|
||||||
event: &Pdu,
|
pdu: &PduEvent,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
) -> bool
|
) -> bool {
|
||||||
where
|
|
||||||
Pdu: Event + Send + Sync,
|
|
||||||
{
|
|
||||||
// exclude Synapse's dummy events from bloating up response bodies. clients
|
// exclude Synapse's dummy events from bloating up response bodies. clients
|
||||||
// don't need to see this.
|
// don't need to see this.
|
||||||
if event.kind().to_cow_str() == "org.matrix.dummy_event" {
|
if pdu.kind.to_cow_str() == "org.matrix.dummy_event" {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(event.kind()).is_ok();
|
let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok();
|
||||||
|
|
||||||
let ignored_server = services
|
let ignored_server = services
|
||||||
.moderation
|
.moderation
|
||||||
.is_remote_server_ignored(event.sender().server_name());
|
.is_remote_server_ignored(pdu.sender().server_name());
|
||||||
|
|
||||||
if ignored_type
|
if ignored_type
|
||||||
&& (ignored_server
|
&& (ignored_server
|
||||||
|| (!services.config.send_messages_from_ignored_users_to_client
|
|| (!services.config.send_messages_from_ignored_users_to_client
|
||||||
&& services
|
&& services.users.user_is_ignored(&pdu.sender, user_id).await))
|
||||||
.users
|
|
||||||
.user_is_ignored(event.sender(), user_id)
|
|
||||||
.await))
|
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -309,7 +302,7 @@ pub(crate) async fn visibility_filter(
|
||||||
services
|
services
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_event(user_id, pdu.room_id(), pdu.event_id())
|
.user_can_see_event(user_id, &pdu.room_id, &pdu.event_id)
|
||||||
.await
|
.await
|
||||||
.then_some(item)
|
.then_some(item)
|
||||||
}
|
}
|
||||||
|
@ -317,7 +310,7 @@ pub(crate) async fn visibility_filter(
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option<PdusIterItem> {
|
pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option<PdusIterItem> {
|
||||||
let (_, pdu) = &item;
|
let (_, pdu) = &item;
|
||||||
filter.matches(pdu).then_some(item)
|
pdu.matches(filter).then_some(item)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(debug_assertions, conduwuit::ctor)]
|
#[cfg_attr(debug_assertions, conduwuit::ctor)]
|
||||||
|
|
|
@ -1,8 +1,11 @@
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{Err, Result, utils};
|
use conduwuit::{Error, Result, utils};
|
||||||
use ruma::{api::client::account, authentication::TokenType};
|
use ruma::{
|
||||||
|
api::client::{account, error::ErrorKind},
|
||||||
|
authentication::TokenType,
|
||||||
|
};
|
||||||
|
|
||||||
use super::TOKEN_LENGTH;
|
use super::TOKEN_LENGTH;
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
@ -16,15 +19,17 @@ pub(crate) async fn create_openid_token_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<account::request_openid_token::v3::Request>,
|
body: Ruma<account::request_openid_token::v3::Request>,
|
||||||
) -> Result<account::request_openid_token::v3::Response> {
|
) -> Result<account::request_openid_token::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if sender_user != body.user_id {
|
if sender_user != &body.user_id {
|
||||||
return Err!(Request(InvalidParam(
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
"Not allowed to request OpenID tokens on behalf of other users",
|
"Not allowed to request OpenID tokens on behalf of other users",
|
||||||
)));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let access_token = utils::random_string(TOKEN_LENGTH);
|
let access_token = utils::random_string(TOKEN_LENGTH);
|
||||||
|
|
||||||
let expires_in = services
|
let expires_in = services
|
||||||
.users
|
.users
|
||||||
.create_openid_token(&body.user_id, &access_token)?;
|
.create_openid_token(&body.user_id, &access_token)?;
|
||||||
|
|
|
@ -2,22 +2,22 @@ use std::collections::BTreeMap;
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result,
|
Err, Error, Result,
|
||||||
matrix::pdu::PduBuilder,
|
matrix::pdu::PduBuilder,
|
||||||
utils::{IterStream, future::TryExtExt, stream::TryIgnore},
|
utils::{IterStream, stream::TryIgnore},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
use futures::{
|
use futures::{StreamExt, TryStreamExt, future::join3};
|
||||||
StreamExt, TryStreamExt,
|
|
||||||
future::{join, join3, join4},
|
|
||||||
};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
OwnedMxcUri, OwnedRoomId, UserId,
|
OwnedMxcUri, OwnedRoomId, UserId,
|
||||||
api::{
|
api::{
|
||||||
client::profile::{
|
client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
profile::{
|
||||||
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
|
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||||
|
@ -35,7 +35,7 @@ pub(crate) async fn set_displayname_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<set_display_name::v3::Request>,
|
body: Ruma<set_display_name::v3::Request>,
|
||||||
) -> Result<set_display_name::v3::Response> {
|
) -> Result<set_display_name::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
if services.users.is_suspended(sender_user).await? {
|
if services.users.is_suspended(sender_user).await? {
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ pub(crate) async fn get_displayname_route(
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
||||||
// federation
|
// federation
|
||||||
return Err!(Request(NotFound("Profile was not found.")));
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_display_name::v3::Response {
|
Ok(get_display_name::v3::Response {
|
||||||
|
@ -127,7 +127,7 @@ pub(crate) async fn set_avatar_url_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<set_avatar_url::v3::Request>,
|
body: Ruma<set_avatar_url::v3::Request>,
|
||||||
) -> Result<set_avatar_url::v3::Response> {
|
) -> Result<set_avatar_url::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
if services.users.is_suspended(sender_user).await? {
|
if services.users.is_suspended(sender_user).await? {
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||||
}
|
}
|
||||||
|
@ -195,9 +195,11 @@ pub(crate) async fn get_avatar_url_route(
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_displayname(&body.user_id, response.displayname.clone());
|
.set_displayname(&body.user_id, response.displayname.clone());
|
||||||
|
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_avatar_url(&body.user_id, response.avatar_url.clone());
|
.set_avatar_url(&body.user_id, response.avatar_url.clone());
|
||||||
|
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_blurhash(&body.user_id, response.blurhash.clone());
|
.set_blurhash(&body.user_id, response.blurhash.clone());
|
||||||
|
@ -212,16 +214,13 @@ pub(crate) async fn get_avatar_url_route(
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
||||||
// federation
|
// federation
|
||||||
return Err!(Request(NotFound("Profile was not found.")));
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (avatar_url, blurhash) = join(
|
Ok(get_avatar_url::v3::Response {
|
||||||
services.users.avatar_url(&body.user_id).ok(),
|
avatar_url: services.users.avatar_url(&body.user_id).await.ok(),
|
||||||
services.users.blurhash(&body.user_id).ok(),
|
blurhash: services.users.blurhash(&body.user_id).await.ok(),
|
||||||
)
|
})
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(get_avatar_url::v3::Response { avatar_url, blurhash })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v3/profile/{userId}`
|
/// # `GET /_matrix/client/v3/profile/{userId}`
|
||||||
|
@ -254,12 +253,15 @@ pub(crate) async fn get_profile_route(
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_displayname(&body.user_id, response.displayname.clone());
|
.set_displayname(&body.user_id, response.displayname.clone());
|
||||||
|
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_avatar_url(&body.user_id, response.avatar_url.clone());
|
.set_avatar_url(&body.user_id, response.avatar_url.clone());
|
||||||
|
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_blurhash(&body.user_id, response.blurhash.clone());
|
.set_blurhash(&body.user_id, response.blurhash.clone());
|
||||||
|
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_timezone(&body.user_id, response.tz.clone());
|
.set_timezone(&body.user_id, response.tz.clone());
|
||||||
|
@ -285,7 +287,7 @@ pub(crate) async fn get_profile_route(
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
||||||
// federation
|
// federation
|
||||||
return Err!(Request(NotFound("Profile was not found.")));
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut custom_profile_fields: BTreeMap<String, serde_json::Value> = services
|
let mut custom_profile_fields: BTreeMap<String, serde_json::Value> = services
|
||||||
|
@ -298,19 +300,11 @@ pub(crate) async fn get_profile_route(
|
||||||
custom_profile_fields.remove("us.cloke.msc4175.tz");
|
custom_profile_fields.remove("us.cloke.msc4175.tz");
|
||||||
custom_profile_fields.remove("m.tz");
|
custom_profile_fields.remove("m.tz");
|
||||||
|
|
||||||
let (avatar_url, blurhash, displayname, tz) = join4(
|
|
||||||
services.users.avatar_url(&body.user_id).ok(),
|
|
||||||
services.users.blurhash(&body.user_id).ok(),
|
|
||||||
services.users.displayname(&body.user_id).ok(),
|
|
||||||
services.users.timezone(&body.user_id).ok(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(get_profile::v3::Response {
|
Ok(get_profile::v3::Response {
|
||||||
avatar_url,
|
avatar_url: services.users.avatar_url(&body.user_id).await.ok(),
|
||||||
blurhash,
|
blurhash: services.users.blurhash(&body.user_id).await.ok(),
|
||||||
displayname,
|
displayname: services.users.displayname(&body.user_id).await.ok(),
|
||||||
tz,
|
tz: services.users.timezone(&body.user_id).await.ok(),
|
||||||
custom_profile_fields,
|
custom_profile_fields,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -322,12 +316,16 @@ pub async fn update_displayname(
|
||||||
all_joined_rooms: &[OwnedRoomId],
|
all_joined_rooms: &[OwnedRoomId],
|
||||||
) {
|
) {
|
||||||
let (current_avatar_url, current_blurhash, current_displayname) = join3(
|
let (current_avatar_url, current_blurhash, current_displayname) = join3(
|
||||||
services.users.avatar_url(user_id).ok(),
|
services.users.avatar_url(user_id),
|
||||||
services.users.blurhash(user_id).ok(),
|
services.users.blurhash(user_id),
|
||||||
services.users.displayname(user_id).ok(),
|
services.users.displayname(user_id),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
let current_avatar_url = current_avatar_url.ok();
|
||||||
|
let current_blurhash = current_blurhash.ok();
|
||||||
|
let current_displayname = current_displayname.ok();
|
||||||
|
|
||||||
if displayname == current_displayname {
|
if displayname == current_displayname {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -371,12 +369,16 @@ pub async fn update_avatar_url(
|
||||||
all_joined_rooms: &[OwnedRoomId],
|
all_joined_rooms: &[OwnedRoomId],
|
||||||
) {
|
) {
|
||||||
let (current_avatar_url, current_blurhash, current_displayname) = join3(
|
let (current_avatar_url, current_blurhash, current_displayname) = join3(
|
||||||
services.users.avatar_url(user_id).ok(),
|
services.users.avatar_url(user_id),
|
||||||
services.users.blurhash(user_id).ok(),
|
services.users.blurhash(user_id),
|
||||||
services.users.displayname(user_id).ok(),
|
services.users.displayname(user_id),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
let current_avatar_url = current_avatar_url.ok();
|
||||||
|
let current_blurhash = current_blurhash.ok();
|
||||||
|
let current_displayname = current_displayname.ok();
|
||||||
|
|
||||||
if current_avatar_url == avatar_url && current_blurhash == blurhash {
|
if current_avatar_url == avatar_url && current_blurhash == blurhash {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,14 +79,17 @@ pub(crate) async fn get_pushrules_all_route(
|
||||||
|
|
||||||
global_ruleset.update_with_server_default(Ruleset::server_default(sender_user));
|
global_ruleset.update_with_server_default(Ruleset::server_default(sender_user));
|
||||||
|
|
||||||
let ty = GlobalAccountDataEventType::PushRules;
|
|
||||||
let event = PushRulesEvent {
|
|
||||||
content: PushRulesEventContent { global: global_ruleset.clone() },
|
|
||||||
};
|
|
||||||
|
|
||||||
services
|
services
|
||||||
.account_data
|
.account_data
|
||||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?)
|
.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(PushRulesEvent {
|
||||||
|
content: PushRulesEventContent { global: global_ruleset.clone() },
|
||||||
|
})
|
||||||
|
.expect("to json always works"),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -103,7 +106,7 @@ pub(crate) async fn get_pushrules_global_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_pushrules_global_scope::v3::Request>,
|
body: Ruma<get_pushrules_global_scope::v3::Request>,
|
||||||
) -> Result<get_pushrules_global_scope::v3::Response> {
|
) -> Result<get_pushrules_global_scope::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let Some(content_value) = services
|
let Some(content_value) = services
|
||||||
.account_data
|
.account_data
|
||||||
|
@ -115,17 +118,19 @@ pub(crate) async fn get_pushrules_global_route(
|
||||||
else {
|
else {
|
||||||
// user somehow has non-existent push rule event. recreate it and return server
|
// user somehow has non-existent push rule event. recreate it and return server
|
||||||
// default silently
|
// default silently
|
||||||
|
services
|
||||||
let ty = GlobalAccountDataEventType::PushRules;
|
.account_data
|
||||||
let event = PushRulesEvent {
|
.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(PushRulesEvent {
|
||||||
content: PushRulesEventContent {
|
content: PushRulesEventContent {
|
||||||
global: Ruleset::server_default(sender_user),
|
global: Ruleset::server_default(sender_user),
|
||||||
},
|
},
|
||||||
};
|
})
|
||||||
|
.expect("to json always works"),
|
||||||
services
|
)
|
||||||
.account_data
|
|
||||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Ok(get_pushrules_global_scope::v3::Response {
|
return Ok(get_pushrules_global_scope::v3::Response {
|
||||||
|
@ -218,7 +223,7 @@ pub(crate) async fn get_pushrule_route(
|
||||||
if let Some(rule) = rule {
|
if let Some(rule) = rule {
|
||||||
Ok(get_pushrule::v3::Response { rule })
|
Ok(get_pushrule::v3::Response { rule })
|
||||||
} else {
|
} else {
|
||||||
Err!(Request(NotFound("Push rule not found.")))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,8 +234,9 @@ pub(crate) async fn set_pushrule_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<set_pushrule::v3::Request>,
|
body: Ruma<set_pushrule::v3::Request>,
|
||||||
) -> Result<set_pushrule::v3::Response> {
|
) -> Result<set_pushrule::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let body = &body.body;
|
let body = body.body;
|
||||||
|
|
||||||
let mut account_data: PushRulesEvent = services
|
let mut account_data: PushRulesEvent = services
|
||||||
.account_data
|
.account_data
|
||||||
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
|
.get_global(sender_user, GlobalAccountDataEventType::PushRules)
|
||||||
|
@ -269,10 +275,14 @@ pub(crate) async fn set_pushrule_route(
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
let ty = GlobalAccountDataEventType::PushRules;
|
|
||||||
services
|
services
|
||||||
.account_data
|
.account_data
|
||||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
|
.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(set_pushrule::v3::Response {})
|
Ok(set_pushrule::v3::Response {})
|
||||||
|
@ -285,7 +295,7 @@ pub(crate) async fn get_pushrule_actions_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_pushrule_actions::v3::Request>,
|
body: Ruma<get_pushrule_actions::v3::Request>,
|
||||||
) -> Result<get_pushrule_actions::v3::Response> {
|
) -> Result<get_pushrule_actions::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
// remove old deprecated mentions push rules as per MSC4210
|
// remove old deprecated mentions push rules as per MSC4210
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
|
@ -319,7 +329,7 @@ pub(crate) async fn set_pushrule_actions_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<set_pushrule_actions::v3::Request>,
|
body: Ruma<set_pushrule_actions::v3::Request>,
|
||||||
) -> Result<set_pushrule_actions::v3::Response> {
|
) -> Result<set_pushrule_actions::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut account_data: PushRulesEvent = services
|
let mut account_data: PushRulesEvent = services
|
||||||
.account_data
|
.account_data
|
||||||
|
@ -333,13 +343,17 @@ pub(crate) async fn set_pushrule_actions_route(
|
||||||
.set_actions(body.kind.clone(), &body.rule_id, body.actions.clone())
|
.set_actions(body.kind.clone(), &body.rule_id, body.actions.clone())
|
||||||
.is_err()
|
.is_err()
|
||||||
{
|
{
|
||||||
return Err!(Request(NotFound("Push rule not found.")));
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let ty = GlobalAccountDataEventType::PushRules;
|
|
||||||
services
|
services
|
||||||
.account_data
|
.account_data
|
||||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
|
.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(set_pushrule_actions::v3::Response {})
|
Ok(set_pushrule_actions::v3::Response {})
|
||||||
|
@ -352,7 +366,7 @@ pub(crate) async fn get_pushrule_enabled_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_pushrule_enabled::v3::Request>,
|
body: Ruma<get_pushrule_enabled::v3::Request>,
|
||||||
) -> Result<get_pushrule_enabled::v3::Response> {
|
) -> Result<get_pushrule_enabled::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
// remove old deprecated mentions push rules as per MSC4210
|
// remove old deprecated mentions push rules as per MSC4210
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
|
@ -386,7 +400,7 @@ pub(crate) async fn set_pushrule_enabled_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<set_pushrule_enabled::v3::Request>,
|
body: Ruma<set_pushrule_enabled::v3::Request>,
|
||||||
) -> Result<set_pushrule_enabled::v3::Response> {
|
) -> Result<set_pushrule_enabled::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut account_data: PushRulesEvent = services
|
let mut account_data: PushRulesEvent = services
|
||||||
.account_data
|
.account_data
|
||||||
|
@ -400,13 +414,17 @@ pub(crate) async fn set_pushrule_enabled_route(
|
||||||
.set_enabled(body.kind.clone(), &body.rule_id, body.enabled)
|
.set_enabled(body.kind.clone(), &body.rule_id, body.enabled)
|
||||||
.is_err()
|
.is_err()
|
||||||
{
|
{
|
||||||
return Err!(Request(NotFound("Push rule not found.")));
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let ty = GlobalAccountDataEventType::PushRules;
|
|
||||||
services
|
services
|
||||||
.account_data
|
.account_data
|
||||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
|
.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(set_pushrule_enabled::v3::Response {})
|
Ok(set_pushrule_enabled::v3::Response {})
|
||||||
|
@ -419,7 +437,7 @@ pub(crate) async fn delete_pushrule_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<delete_pushrule::v3::Request>,
|
body: Ruma<delete_pushrule::v3::Request>,
|
||||||
) -> Result<delete_pushrule::v3::Response> {
|
) -> Result<delete_pushrule::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut account_data: PushRulesEvent = services
|
let mut account_data: PushRulesEvent = services
|
||||||
.account_data
|
.account_data
|
||||||
|
@ -445,10 +463,14 @@ pub(crate) async fn delete_pushrule_route(
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
let ty = GlobalAccountDataEventType::PushRules;
|
|
||||||
services
|
services
|
||||||
.account_data
|
.account_data
|
||||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?)
|
.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(delete_pushrule::v3::Response {})
|
Ok(delete_pushrule::v3::Response {})
|
||||||
|
@ -461,7 +483,7 @@ pub(crate) async fn get_pushers_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_pushers::v3::Request>,
|
body: Ruma<get_pushers::v3::Request>,
|
||||||
) -> Result<get_pushers::v3::Response> {
|
) -> Result<get_pushers::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
Ok(get_pushers::v3::Response {
|
Ok(get_pushers::v3::Response {
|
||||||
pushers: services.pusher.get_pushers(sender_user).await,
|
pushers: services.pusher.get_pushers(sender_user).await,
|
||||||
|
@ -477,7 +499,7 @@ pub(crate) async fn set_pushers_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<set_pusher::v3::Request>,
|
body: Ruma<set_pusher::v3::Request>,
|
||||||
) -> Result<set_pusher::v3::Response> {
|
) -> Result<set_pusher::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
services
|
services
|
||||||
.pusher
|
.pusher
|
||||||
|
@ -493,16 +515,19 @@ async fn recreate_push_rules_and_return(
|
||||||
services: &Services,
|
services: &Services,
|
||||||
sender_user: &ruma::UserId,
|
sender_user: &ruma::UserId,
|
||||||
) -> Result<get_pushrules_all::v3::Response> {
|
) -> Result<get_pushrules_all::v3::Response> {
|
||||||
let ty = GlobalAccountDataEventType::PushRules;
|
services
|
||||||
let event = PushRulesEvent {
|
.account_data
|
||||||
|
.update(
|
||||||
|
None,
|
||||||
|
sender_user,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(PushRulesEvent {
|
||||||
content: PushRulesEventContent {
|
content: PushRulesEventContent {
|
||||||
global: Ruleset::server_default(sender_user),
|
global: Ruleset::server_default(sender_user),
|
||||||
},
|
},
|
||||||
};
|
})
|
||||||
|
.expect("to json always works"),
|
||||||
services
|
)
|
||||||
.account_data
|
|
||||||
.update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(get_pushrules_all::v3::Response {
|
Ok(get_pushrules_all::v3::Response {
|
||||||
|
|
|
@ -37,7 +37,7 @@ pub(crate) async fn set_read_marker_route(
|
||||||
Some(&body.room_id),
|
Some(&body.room_id),
|
||||||
sender_user,
|
sender_user,
|
||||||
RoomAccountDataEventType::FullyRead,
|
RoomAccountDataEventType::FullyRead,
|
||||||
&serde_json::to_value(fully_read_event)?,
|
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
@ -58,9 +58,7 @@ pub(crate) async fn set_read_marker_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(event) = &body.read_receipt {
|
if let Some(event) = &body.read_receipt {
|
||||||
if services.config.allow_local_read_receipts
|
if !services.users.is_suspended(sender_user).await? {
|
||||||
&& !services.users.is_suspended(sender_user).await?
|
|
||||||
{
|
|
||||||
let receipt_content = BTreeMap::from_iter([(
|
let receipt_content = BTreeMap::from_iter([(
|
||||||
event.to_owned(),
|
event.to_owned(),
|
||||||
BTreeMap::from_iter([(
|
BTreeMap::from_iter([(
|
||||||
|
@ -153,7 +151,7 @@ pub(crate) async fn create_receipt_route(
|
||||||
Some(&body.room_id),
|
Some(&body.room_id),
|
||||||
sender_user,
|
sender_user,
|
||||||
RoomAccountDataEventType::FullyRead,
|
RoomAccountDataEventType::FullyRead,
|
||||||
&serde_json::to_value(fully_read_event)?,
|
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
},
|
},
|
||||||
|
|
|
@ -15,8 +15,8 @@ pub(crate) async fn redact_event_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<redact_event::v3::Request>,
|
body: Ruma<redact_event::v3::Request>,
|
||||||
) -> Result<redact_event::v3::Response> {
|
) -> Result<redact_event::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let body = &body.body;
|
let body = body.body;
|
||||||
if services.users.is_suspended(sender_user).await? {
|
if services.users.is_suspended(sender_user).await? {
|
||||||
// TODO: Users can redact their own messages while suspended
|
// TODO: Users can redact their own messages while suspended
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Result, at,
|
Result, at,
|
||||||
matrix::{Event, event::RelationTypeEqual, pdu::PduCount},
|
matrix::pdu::PduCount,
|
||||||
utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt},
|
utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt},
|
||||||
};
|
};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::{Services, rooms::timeline::PdusIterItem};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
EventId, RoomId, UInt, UserId,
|
EventId, RoomId, UInt, UserId,
|
||||||
|
@ -129,7 +129,7 @@ async fn paginate_relations_with_filter(
|
||||||
// Spec (v1.10) recommends depth of at least 3
|
// Spec (v1.10) recommends depth of at least 3
|
||||||
let depth: u8 = if recurse { 3 } else { 1 };
|
let depth: u8 = if recurse { 3 } else { 1 };
|
||||||
|
|
||||||
let events: Vec<_> = services
|
let events: Vec<PdusIterItem> = services
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_metadata
|
.pdu_metadata
|
||||||
.get_relations(sender_user, room_id, target, start, limit, depth, dir)
|
.get_relations(sender_user, room_id, target, start, limit, depth, dir)
|
||||||
|
@ -138,12 +138,12 @@ async fn paginate_relations_with_filter(
|
||||||
.filter(|(_, pdu)| {
|
.filter(|(_, pdu)| {
|
||||||
filter_event_type
|
filter_event_type
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.is_none_or(|kind| kind == pdu.kind())
|
.is_none_or(|kind| *kind == pdu.kind)
|
||||||
})
|
})
|
||||||
.filter(|(_, pdu)| {
|
.filter(|(_, pdu)| {
|
||||||
filter_rel_type
|
filter_rel_type
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.is_none_or(|rel_type| rel_type.relation_type_equal(pdu))
|
.is_none_or(|rel_type| pdu.relation_type_equal(rel_type))
|
||||||
})
|
})
|
||||||
.stream()
|
.stream()
|
||||||
.ready_take_while(|(count, _)| Some(*count) != to)
|
.ready_take_while(|(count, _)| Some(*count) != to)
|
||||||
|
@ -167,22 +167,22 @@ async fn paginate_relations_with_filter(
|
||||||
chunk: events
|
chunk: events
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(at!(1))
|
.map(at!(1))
|
||||||
.map(Event::into_format)
|
.map(|pdu| pdu.to_message_like_event())
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn visibility_filter<Pdu: Event + Send + Sync>(
|
async fn visibility_filter(
|
||||||
services: &Services,
|
services: &Services,
|
||||||
sender_user: &UserId,
|
sender_user: &UserId,
|
||||||
item: (PduCount, Pdu),
|
item: PdusIterItem,
|
||||||
) -> Option<(PduCount, Pdu)> {
|
) -> Option<PdusIterItem> {
|
||||||
let (_, pdu) = &item;
|
let (_, pdu) = &item;
|
||||||
|
|
||||||
services
|
services
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.user_can_see_event(sender_user, pdu.room_id(), pdu.event_id())
|
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
|
||||||
.await
|
.await
|
||||||
.then_some(item)
|
.then_some(item)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,33 +1,23 @@
|
||||||
use std::{fmt::Write as _, ops::Mul, time::Duration};
|
use std::time::Duration;
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{Err, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
|
use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
|
EventId, RoomId, UserId,
|
||||||
api::client::{
|
api::client::{
|
||||||
report_user,
|
error::ErrorKind,
|
||||||
room::{report_content, report_room},
|
room::{report_content, report_room},
|
||||||
},
|
},
|
||||||
events::{Mentions, room::message::RoomMessageEventContent},
|
events::room::message,
|
||||||
int,
|
int,
|
||||||
};
|
};
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
struct Report {
|
|
||||||
sender: OwnedUserId,
|
|
||||||
room_id: Option<OwnedRoomId>,
|
|
||||||
event_id: Option<OwnedEventId>,
|
|
||||||
user_id: Option<OwnedUserId>,
|
|
||||||
report_type: String,
|
|
||||||
reason: Option<String>,
|
|
||||||
score: Option<ruma::Int>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/rooms/{roomId}/report`
|
/// # `POST /_matrix/client/v3/rooms/{roomId}/report`
|
||||||
///
|
///
|
||||||
/// Reports an abusive room to homeserver admins
|
/// Reports an abusive room to homeserver admins
|
||||||
|
@ -37,14 +27,19 @@ pub(crate) async fn report_room_route(
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
InsecureClientIp(client): InsecureClientIp,
|
||||||
body: Ruma<report_room::v3::Request>,
|
body: Ruma<report_room::v3::Request>,
|
||||||
) -> Result<report_room::v3::Response> {
|
) -> Result<report_room::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
// user authentication
|
||||||
if services.users.is_suspended(sender_user).await? {
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
info!(
|
||||||
|
"Received room report by user {sender_user} for room {} with reason: \"{}\"",
|
||||||
|
body.room_id,
|
||||||
|
body.reason.as_deref().unwrap_or("")
|
||||||
|
);
|
||||||
|
|
||||||
if body.reason.as_ref().is_some_and(|s| s.len() > 750) {
|
if body.reason.as_ref().is_some_and(|s| s.len() > 750) {
|
||||||
return Err!(Request(
|
return Err(Error::BadRequest(
|
||||||
InvalidParam("Reason too long, should be 750 characters or fewer",)
|
ErrorKind::InvalidParam,
|
||||||
|
"Reason too long, should be 750 characters or fewer",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,23 +55,19 @@ pub(crate) async fn report_room_route(
|
||||||
"Room does not exist to us, no local users have joined at all"
|
"Room does not exist to us, no local users have joined at all"
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
info!(
|
|
||||||
"Received room report by user {sender_user} for room {} with reason: \"{}\"",
|
// send admin room message that we received the report with an @room ping for
|
||||||
|
// urgency
|
||||||
|
services
|
||||||
|
.admin
|
||||||
|
.send_message(message::RoomMessageEventContent::text_markdown(format!(
|
||||||
|
"@room Room report received from {} -\n\nRoom ID: {}\n\nReport Reason: {}",
|
||||||
|
sender_user.to_owned(),
|
||||||
body.room_id,
|
body.room_id,
|
||||||
body.reason.as_deref().unwrap_or("")
|
body.reason.as_deref().unwrap_or("")
|
||||||
);
|
)))
|
||||||
|
.await
|
||||||
let report = Report {
|
.ok();
|
||||||
sender: sender_user.to_owned(),
|
|
||||||
room_id: Some(body.room_id.clone()),
|
|
||||||
event_id: None,
|
|
||||||
user_id: None,
|
|
||||||
report_type: "room".to_owned(),
|
|
||||||
reason: body.reason.clone(),
|
|
||||||
score: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
services.admin.send_message(build_report(report)).await.ok();
|
|
||||||
|
|
||||||
Ok(report_room::v3::Response {})
|
Ok(report_room::v3::Response {})
|
||||||
}
|
}
|
||||||
|
@ -91,10 +82,15 @@ pub(crate) async fn report_event_route(
|
||||||
body: Ruma<report_content::v3::Request>,
|
body: Ruma<report_content::v3::Request>,
|
||||||
) -> Result<report_content::v3::Response> {
|
) -> Result<report_content::v3::Response> {
|
||||||
// user authentication
|
// user authentication
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
info!(
|
||||||
}
|
"Received event report by user {sender_user} for room {} and event ID {}, with reason: \
|
||||||
|
\"{}\"",
|
||||||
|
body.room_id,
|
||||||
|
body.event_id,
|
||||||
|
body.reason.as_deref().unwrap_or("")
|
||||||
|
);
|
||||||
|
|
||||||
delay_response().await;
|
delay_response().await;
|
||||||
|
|
||||||
|
@ -113,73 +109,27 @@ pub(crate) async fn report_event_route(
|
||||||
&pdu,
|
&pdu,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
info!(
|
|
||||||
"Received event report by user {sender_user} for room {} and event ID {}, with reason: \
|
// send admin room message that we received the report with an @room ping for
|
||||||
\"{}\"",
|
// urgency
|
||||||
body.room_id,
|
services
|
||||||
body.event_id,
|
.admin
|
||||||
|
.send_message(message::RoomMessageEventContent::text_markdown(format!(
|
||||||
|
"@room Event report received from {} -\n\nEvent ID: {}\nRoom ID: {}\nSent By: \
|
||||||
|
{}\n\nReport Score: {}\nReport Reason: {}",
|
||||||
|
sender_user.to_owned(),
|
||||||
|
pdu.event_id,
|
||||||
|
pdu.room_id,
|
||||||
|
pdu.sender,
|
||||||
|
body.score.unwrap_or_else(|| ruma::Int::from(0)),
|
||||||
body.reason.as_deref().unwrap_or("")
|
body.reason.as_deref().unwrap_or("")
|
||||||
);
|
)))
|
||||||
let report = Report {
|
.await
|
||||||
sender: sender_user.to_owned(),
|
.ok();
|
||||||
room_id: Some(body.room_id.clone()),
|
|
||||||
event_id: Some(body.event_id.clone()),
|
|
||||||
user_id: None,
|
|
||||||
report_type: "event".to_owned(),
|
|
||||||
reason: body.reason.clone(),
|
|
||||||
score: body.score,
|
|
||||||
};
|
|
||||||
services.admin.send_message(build_report(report)).await.ok();
|
|
||||||
|
|
||||||
Ok(report_content::v3::Response {})
|
Ok(report_content::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(%client), name = "report_user")]
|
|
||||||
pub(crate) async fn report_user_route(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
InsecureClientIp(client): InsecureClientIp,
|
|
||||||
body: Ruma<report_user::v3::Request>,
|
|
||||||
) -> Result<report_user::v3::Response> {
|
|
||||||
// user authentication
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
|
||||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
if body.reason.as_ref().is_some_and(|s| s.len() > 750) {
|
|
||||||
return Err!(Request(
|
|
||||||
InvalidParam("Reason too long, should be 750 characters or fewer",)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
delay_response().await;
|
|
||||||
|
|
||||||
if !services.users.is_active_local(&body.user_id).await {
|
|
||||||
// return 200 as to not reveal if the user exists. Recommended by spec.
|
|
||||||
return Ok(report_user::v3::Response {});
|
|
||||||
}
|
|
||||||
|
|
||||||
let report = Report {
|
|
||||||
sender: sender_user.to_owned(),
|
|
||||||
room_id: None,
|
|
||||||
event_id: None,
|
|
||||||
user_id: Some(body.user_id.clone()),
|
|
||||||
report_type: "user".to_owned(),
|
|
||||||
reason: body.reason.clone(),
|
|
||||||
score: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"Received room report from {sender_user} for user {} with reason: \"{}\"",
|
|
||||||
body.user_id,
|
|
||||||
body.reason.as_deref().unwrap_or("")
|
|
||||||
);
|
|
||||||
|
|
||||||
services.admin.send_message(build_report(report)).await.ok();
|
|
||||||
|
|
||||||
Ok(report_user::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// in the following order:
|
/// in the following order:
|
||||||
///
|
///
|
||||||
/// check if the room ID from the URI matches the PDU's room ID
|
/// check if the room ID from the URI matches the PDU's room ID
|
||||||
|
@ -201,16 +151,23 @@ async fn is_event_report_valid(
|
||||||
);
|
);
|
||||||
|
|
||||||
if room_id != pdu.room_id {
|
if room_id != pdu.room_id {
|
||||||
return Err!(Request(NotFound("Event ID does not belong to the reported room",)));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Event ID does not belong to the reported room",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if score.is_some_and(|s| s > int!(0) || s < int!(-100)) {
|
if score.is_some_and(|s| s > int!(0) || s < int!(-100)) {
|
||||||
return Err!(Request(InvalidParam("Invalid score, must be within 0 to -100",)));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Invalid score, must be within 0 to -100",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if reason.as_ref().is_some_and(|s| s.len() > 750) {
|
if reason.as_ref().is_some_and(|s| s.len() > 750) {
|
||||||
return Err!(Request(
|
return Err(Error::BadRequest(
|
||||||
InvalidParam("Reason too long, should be 750 characters or fewer",)
|
ErrorKind::InvalidParam,
|
||||||
|
"Reason too long, should be 750 characters or fewer",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -221,35 +178,15 @@ async fn is_event_report_valid(
|
||||||
.ready_any(|user_id| user_id == sender_user)
|
.ready_any(|user_id| user_id == sender_user)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!(Request(NotFound("You are not in the room you are reporting.",)));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"You are not in the room you are reporting.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a report message to be sent to the admin room.
|
|
||||||
fn build_report(report: Report) -> RoomMessageEventContent {
|
|
||||||
let mut text =
|
|
||||||
format!("@room New {} report received from {}:\n\n", report.report_type, report.sender);
|
|
||||||
if report.user_id.is_some() {
|
|
||||||
let _ = writeln!(text, "- Reported User ID: `{}`", report.user_id.unwrap());
|
|
||||||
}
|
|
||||||
if report.room_id.is_some() {
|
|
||||||
let _ = writeln!(text, "- Reported Room ID: `{}`", report.room_id.unwrap());
|
|
||||||
}
|
|
||||||
if report.event_id.is_some() {
|
|
||||||
let _ = writeln!(text, "- Reported Event ID: `{}`", report.event_id.unwrap());
|
|
||||||
}
|
|
||||||
if let Some(score) = report.score {
|
|
||||||
let _ = writeln!(text, "- User-supplied offensiveness score: {}%", score.mul(int!(-1)));
|
|
||||||
}
|
|
||||||
if let Some(reason) = report.reason {
|
|
||||||
let _ = writeln!(text, "- Report Reason: {reason}");
|
|
||||||
}
|
|
||||||
|
|
||||||
RoomMessageEventContent::text_markdown(text).add_mentions(Mentions::with_room_mention())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// even though this is kinda security by obscurity, let's still make a small
|
/// even though this is kinda security by obscurity, let's still make a small
|
||||||
/// random delay sending a response per spec suggestion regarding
|
/// random delay sending a response per spec suggestion regarding
|
||||||
/// enumerating for potential events existing in our server.
|
/// enumerating for potential events existing in our server.
|
||||||
|
@ -259,6 +196,5 @@ async fn delay_response() {
|
||||||
"Got successful /report request, waiting {time_to_wait} seconds before sending \
|
"Got successful /report request, waiting {time_to_wait} seconds before sending \
|
||||||
successful response."
|
successful response."
|
||||||
);
|
);
|
||||||
|
|
||||||
sleep(Duration::from_secs(time_to_wait)).await;
|
sleep(Duration::from_secs(time_to_wait)).await;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::{Error, Result};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::api::client::room::aliases;
|
use ruma::api::client::{error::ErrorKind, room::aliases};
|
||||||
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ pub(crate) async fn get_room_aliases_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<aliases::v3::Request>,
|
body: Ruma<aliases::v3::Request>,
|
||||||
) -> Result<aliases::v3::Response> {
|
) -> Result<aliases::v3::Response> {
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !services
|
if !services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -23,7 +23,10 @@ pub(crate) async fn get_room_aliases_route(
|
||||||
.user_can_see_state_events(sender_user, &body.room_id)
|
.user_can_see_state_events(sender_user, &body.room_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!(Request(Forbidden("You don't have permission to view this room.",)));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::forbidden(),
|
||||||
|
"You don't have permission to view this room.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(aliases::v3::Response {
|
Ok(aliases::v3::Response {
|
||||||
|
|
|
@ -2,7 +2,7 @@ use std::collections::BTreeMap;
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug_info, debug_warn, err, info,
|
Err, Error, Result, debug_info, debug_warn, err, error, info,
|
||||||
matrix::{StateKey, pdu::PduBuilder},
|
matrix::{StateKey, pdu::PduBuilder},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
|
@ -10,7 +10,10 @@ use conduwuit_service::{Services, appservice::RegistrationInfo};
|
||||||
use futures::FutureExt;
|
use futures::FutureExt;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId,
|
CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId,
|
||||||
api::client::room::{self, create_room},
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
room::{self, create_room},
|
||||||
|
},
|
||||||
events::{
|
events::{
|
||||||
TimelineEventType,
|
TimelineEventType,
|
||||||
room::{
|
room::{
|
||||||
|
@ -55,13 +58,16 @@ pub(crate) async fn create_room_route(
|
||||||
) -> Result<create_room::v3::Response> {
|
) -> Result<create_room::v3::Response> {
|
||||||
use create_room::v3::RoomPreset;
|
use create_room::v3::RoomPreset;
|
||||||
|
|
||||||
let sender_user = body.sender_user();
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !services.globals.allow_room_creation()
|
if !services.globals.allow_room_creation()
|
||||||
&& body.appservice_info.is_none()
|
&& body.appservice_info.is_none()
|
||||||
&& !services.users.is_admin(sender_user).await
|
&& !services.users.is_admin(sender_user).await
|
||||||
{
|
{
|
||||||
return Err!(Request(Forbidden("Room creation has been disabled.",)));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::forbidden(),
|
||||||
|
"Room creation has been disabled.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if services.users.is_suspended(sender_user).await? {
|
if services.users.is_suspended(sender_user).await? {
|
||||||
|
@ -75,7 +81,10 @@ pub(crate) async fn create_room_route(
|
||||||
|
|
||||||
// check if room ID doesn't already exist instead of erroring on auth check
|
// check if room ID doesn't already exist instead of erroring on auth check
|
||||||
if services.rooms.short.get_shortroomid(&room_id).await.is_ok() {
|
if services.rooms.short.get_shortroomid(&room_id).await.is_ok() {
|
||||||
return Err!(Request(RoomInUse("Room with that custom room ID already exists",)));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::RoomInUse,
|
||||||
|
"Room with that custom room ID already exists",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if body.visibility == room::Visibility::Public
|
if body.visibility == room::Visibility::Public
|
||||||
|
@ -83,17 +92,19 @@ pub(crate) async fn create_room_route(
|
||||||
&& !services.users.is_admin(sender_user).await
|
&& !services.users.is_admin(sender_user).await
|
||||||
&& body.appservice_info.is_none()
|
&& body.appservice_info.is_none()
|
||||||
{
|
{
|
||||||
warn!(
|
info!(
|
||||||
"Non-admin user {sender_user} tried to publish {room_id} to the room directory \
|
"Non-admin user {sender_user} tried to publish {0} to the room directory while \
|
||||||
while \"lockdown_public_room_directory\" is enabled"
|
\"lockdown_public_room_directory\" is enabled",
|
||||||
|
&room_id
|
||||||
);
|
);
|
||||||
|
|
||||||
if services.server.config.admin_room_notices {
|
if services.server.config.admin_room_notices {
|
||||||
services
|
services
|
||||||
.admin
|
.admin
|
||||||
.notice(&format!(
|
.send_text(&format!(
|
||||||
"Non-admin user {sender_user} tried to publish {room_id} to the room \
|
"Non-admin user {sender_user} tried to publish {0} to the room directory \
|
||||||
directory while \"lockdown_public_room_directory\" is enabled"
|
while \"lockdown_public_room_directory\" is enabled",
|
||||||
|
&room_id
|
||||||
))
|
))
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
@ -118,9 +129,10 @@ pub(crate) async fn create_room_route(
|
||||||
if services.server.supported_room_version(&room_version) {
|
if services.server.supported_room_version(&room_version) {
|
||||||
room_version
|
room_version
|
||||||
} else {
|
} else {
|
||||||
return Err!(Request(UnsupportedRoomVersion(
|
return Err(Error::BadRequest(
|
||||||
"This server does not support that room version."
|
ErrorKind::UnsupportedRoomVersion,
|
||||||
)));
|
"This server does not support that room version.",
|
||||||
|
));
|
||||||
},
|
},
|
||||||
| None => services.server.config.default_room_version.clone(),
|
| None => services.server.config.default_room_version.clone(),
|
||||||
};
|
};
|
||||||
|
@ -132,17 +144,16 @@ pub(crate) async fn create_room_route(
|
||||||
let mut content = content
|
let mut content = content
|
||||||
.deserialize_as::<CanonicalJsonObject>()
|
.deserialize_as::<CanonicalJsonObject>()
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
err!(Request(BadJson(error!(
|
error!("Failed to deserialise content as canonical JSON: {}", e);
|
||||||
"Failed to deserialise content as canonical JSON: {e}"
|
Error::bad_database("Failed to deserialise content as canonical JSON.")
|
||||||
))))
|
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
match room_version {
|
match room_version {
|
||||||
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
|
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
|
||||||
content.insert(
|
content.insert(
|
||||||
"creator".into(),
|
"creator".into(),
|
||||||
json!(&sender_user).try_into().map_err(|e| {
|
json!(&sender_user).try_into().map_err(|e| {
|
||||||
err!(Request(BadJson(debug_error!("Invalid creation content: {e}"))))
|
info!("Invalid creation content: {e}");
|
||||||
|
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
||||||
})?,
|
})?,
|
||||||
);
|
);
|
||||||
},
|
},
|
||||||
|
@ -152,9 +163,9 @@ pub(crate) async fn create_room_route(
|
||||||
}
|
}
|
||||||
content.insert(
|
content.insert(
|
||||||
"room_version".into(),
|
"room_version".into(),
|
||||||
json!(room_version.as_str())
|
json!(room_version.as_str()).try_into().map_err(|_| {
|
||||||
.try_into()
|
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
|
||||||
.map_err(|e| err!(Request(BadJson("Invalid creation content: {e}"))))?,
|
})?,
|
||||||
);
|
);
|
||||||
content
|
content
|
||||||
},
|
},
|
||||||
|
@ -163,13 +174,21 @@ pub(crate) async fn create_room_route(
|
||||||
|
|
||||||
let content = match room_version {
|
let content = match room_version {
|
||||||
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
|
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
|
||||||
RoomCreateEventContent::new_v1(sender_user.to_owned()),
|
RoomCreateEventContent::new_v1(sender_user.clone()),
|
||||||
| _ => RoomCreateEventContent::new_v11(),
|
| _ => RoomCreateEventContent::new_v11(),
|
||||||
};
|
};
|
||||||
let mut content =
|
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())
|
to_raw_value(&content)
|
||||||
|
.expect("we just created this as content was None")
|
||||||
|
.get(),
|
||||||
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
content.insert("room_version".into(), json!(room_version.as_str()).try_into()?);
|
content.insert(
|
||||||
|
"room_version".into(),
|
||||||
|
json!(room_version.as_str())
|
||||||
|
.try_into()
|
||||||
|
.expect("we just created this as content was None"),
|
||||||
|
);
|
||||||
content
|
content
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -181,7 +200,8 @@ pub(crate) async fn create_room_route(
|
||||||
.build_and_append_pdu(
|
.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomCreate,
|
event_type: TimelineEventType::RoomCreate,
|
||||||
content: to_raw_value(&create_content)?,
|
content: to_raw_value(&create_content)
|
||||||
|
.expect("create event content serialization"),
|
||||||
state_key: Some(StateKey::new()),
|
state_key: Some(StateKey::new()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
@ -219,7 +239,7 @@ pub(crate) async fn create_room_route(
|
||||||
| _ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
| _ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut users = BTreeMap::from_iter([(sender_user.to_owned(), int!(100))]);
|
let mut users = BTreeMap::from_iter([(sender_user.clone(), int!(100))]);
|
||||||
|
|
||||||
if preset == RoomPreset::TrustedPrivateChat {
|
if preset == RoomPreset::TrustedPrivateChat {
|
||||||
for invite in &body.invite {
|
for invite in &body.invite {
|
||||||
|
@ -247,7 +267,8 @@ pub(crate) async fn create_room_route(
|
||||||
.build_and_append_pdu(
|
.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: TimelineEventType::RoomPowerLevels,
|
event_type: TimelineEventType::RoomPowerLevels,
|
||||||
content: to_raw_value(&power_levels_content)?,
|
content: to_raw_value(&power_levels_content)
|
||||||
|
.expect("serialized power_levels event content"),
|
||||||
state_key: Some(StateKey::new()),
|
state_key: Some(StateKey::new()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
@ -336,7 +357,8 @@ pub(crate) async fn create_room_route(
|
||||||
// 6. Events listed in initial_state
|
// 6. Events listed in initial_state
|
||||||
for event in &body.initial_state {
|
for event in &body.initial_state {
|
||||||
let mut pdu_builder = event.deserialize_as::<PduBuilder>().map_err(|e| {
|
let mut pdu_builder = event.deserialize_as::<PduBuilder>().map_err(|e| {
|
||||||
err!(Request(InvalidParam(warn!("Invalid initial state event: {e:?}"))))
|
warn!("Invalid initial state event: {:?}", e);
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
debug_info!("Room creation initial state event: {event:?}");
|
debug_info!("Room creation initial state event: {event:?}");
|
||||||
|
@ -345,7 +367,7 @@ pub(crate) async fn create_room_route(
|
||||||
// state event in there with the content of literally `{}` (not null or empty
|
// state event in there with the content of literally `{}` (not null or empty
|
||||||
// string), let's just skip it over and warn.
|
// string), let's just skip it over and warn.
|
||||||
if pdu_builder.content.get().eq("{}") {
|
if pdu_builder.content.get().eq("{}") {
|
||||||
debug_warn!("skipping empty initial state event with content of `{{}}`: {event:?}");
|
info!("skipping empty initial state event with content of `{{}}`: {event:?}");
|
||||||
debug_warn!("content: {}", pdu_builder.content.get());
|
debug_warn!("content: {}", pdu_builder.content.get());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -492,7 +514,9 @@ fn default_power_levels_content(
|
||||||
|
|
||||||
if let Some(power_level_content_override) = power_level_content_override {
|
if let Some(power_level_content_override) = power_level_content_override {
|
||||||
let json: JsonObject = serde_json::from_str(power_level_content_override.json().get())
|
let json: JsonObject = serde_json::from_str(power_level_content_override.json().get())
|
||||||
.map_err(|e| err!(Request(BadJson("Invalid power_level_content_override: {e:?}"))))?;
|
.map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.")
|
||||||
|
})?;
|
||||||
|
|
||||||
for (key, value) in json {
|
for (key, value) in json {
|
||||||
power_levels_content[key] = value;
|
power_levels_content[key] = value;
|
||||||
|
@ -510,14 +534,16 @@ async fn room_alias_check(
|
||||||
) -> Result<OwnedRoomAliasId> {
|
) -> Result<OwnedRoomAliasId> {
|
||||||
// Basic checks on the room alias validity
|
// Basic checks on the room alias validity
|
||||||
if room_alias_name.contains(':') {
|
if room_alias_name.contains(':') {
|
||||||
return Err!(Request(InvalidParam(
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
"Room alias contained `:` which is not allowed. Please note that this expects a \
|
"Room alias contained `:` which is not allowed. Please note that this expects a \
|
||||||
localpart, not the full room alias.",
|
localpart, not the full room alias.",
|
||||||
)));
|
));
|
||||||
} else if room_alias_name.contains(char::is_whitespace) {
|
} else if room_alias_name.contains(char::is_whitespace) {
|
||||||
return Err!(Request(InvalidParam(
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
"Room alias contained spaces which is not a valid room alias.",
|
"Room alias contained spaces which is not a valid room alias.",
|
||||||
)));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if room alias is forbidden
|
// check if room alias is forbidden
|
||||||
|
@ -526,7 +552,7 @@ async fn room_alias_check(
|
||||||
.forbidden_alias_names()
|
.forbidden_alias_names()
|
||||||
.is_match(room_alias_name)
|
.is_match(room_alias_name)
|
||||||
{
|
{
|
||||||
return Err!(Request(Unknown("Room alias name is forbidden.")));
|
return Err(Error::BadRequest(ErrorKind::Unknown, "Room alias name is forbidden."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let server_name = services.globals.server_name();
|
let server_name = services.globals.server_name();
|
||||||
|
@ -546,19 +572,25 @@ async fn room_alias_check(
|
||||||
.await
|
.await
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
return Err!(Request(RoomInUse("Room alias already exists.")));
|
return Err(Error::BadRequest(ErrorKind::RoomInUse, "Room alias already exists."));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(info) = appservice_info {
|
if let Some(info) = appservice_info {
|
||||||
if !info.aliases.is_match(full_room_alias.as_str()) {
|
if !info.aliases.is_match(full_room_alias.as_str()) {
|
||||||
return Err!(Request(Exclusive("Room alias is not in namespace.")));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Exclusive,
|
||||||
|
"Room alias is not in namespace.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
} else if services
|
} else if services
|
||||||
.appservice
|
.appservice
|
||||||
.is_exclusive_alias(&full_room_alias)
|
.is_exclusive_alias(&full_room_alias)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!(Request(Exclusive("Room alias reserved by appservice.",)));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Exclusive,
|
||||||
|
"Room alias reserved by appservice.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
debug_info!("Full room alias: {full_room_alias}");
|
debug_info!("Full room alias: {full_room_alias}");
|
||||||
|
@ -574,33 +606,24 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result<Own
|
||||||
.forbidden_alias_names()
|
.forbidden_alias_names()
|
||||||
.is_match(custom_room_id)
|
.is_match(custom_room_id)
|
||||||
{
|
{
|
||||||
return Err!(Request(Unknown("Custom room ID is forbidden.")));
|
return Err(Error::BadRequest(ErrorKind::Unknown, "Custom room ID is forbidden."));
|
||||||
}
|
|
||||||
|
|
||||||
if custom_room_id.contains(':') {
|
|
||||||
return Err!(Request(InvalidParam(
|
|
||||||
"Custom room ID contained `:` which is not allowed. Please note that this expects a \
|
|
||||||
localpart, not the full room ID.",
|
|
||||||
)));
|
|
||||||
} else if custom_room_id.contains(char::is_whitespace) {
|
|
||||||
return Err!(Request(InvalidParam(
|
|
||||||
"Custom room ID contained spaces which is not valid."
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let server_name = services.globals.server_name();
|
let server_name = services.globals.server_name();
|
||||||
let mut room_id = custom_room_id.to_owned();
|
let mut room_id = custom_room_id.to_owned();
|
||||||
if custom_room_id.contains(':') {
|
if custom_room_id.contains(':') {
|
||||||
if !custom_room_id.starts_with('!') {
|
if !custom_room_id.starts_with('!') {
|
||||||
return Err!(Request(InvalidParam(
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
"Custom room ID contains an unexpected `:` which is not allowed.",
|
"Custom room ID contains an unexpected `:` which is not allowed.",
|
||||||
)));
|
));
|
||||||
}
|
}
|
||||||
} else if custom_room_id.starts_with('!') {
|
} else if custom_room_id.starts_with('!') {
|
||||||
return Err!(Request(InvalidParam(
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
"Room ID is prefixed with !, but is not fully qualified. You likely did not want \
|
"Room ID is prefixed with !, but is not fully qualified. You likely did not want \
|
||||||
this.",
|
this.",
|
||||||
)));
|
));
|
||||||
} else {
|
} else {
|
||||||
room_id = format!("!{custom_room_id}:{server_name}");
|
room_id = format!("!{custom_room_id}:{server_name}");
|
||||||
}
|
}
|
||||||
|
@ -612,7 +635,10 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result<Own
|
||||||
.expect("failed to extract server name from room ID")
|
.expect("failed to extract server name from room ID")
|
||||||
!= server_name
|
!= server_name
|
||||||
{
|
{
|
||||||
Err!(Request(InvalidParam("Custom room ID must be on this server.",)))
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Custom room ID must be on this server.",
|
||||||
|
))
|
||||||
} else {
|
} else {
|
||||||
Ok(full_room_id)
|
Ok(full_room_id)
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,5 +40,5 @@ pub(crate) async fn get_room_event_route(
|
||||||
|
|
||||||
event.add_age().ok();
|
event.add_age().ok();
|
||||||
|
|
||||||
Ok(get_room_event::v3::Response { event: event.into_format() })
|
Ok(get_room_event::v3::Response { event: event.into_room_event() })
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Event, Result, at,
|
Err, PduEvent, Result, at,
|
||||||
utils::{BoolExt, stream::TryTools},
|
utils::{BoolExt, stream::TryTools},
|
||||||
};
|
};
|
||||||
use futures::{FutureExt, TryStreamExt, future::try_join4};
|
use futures::TryStreamExt;
|
||||||
use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response};
|
use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response};
|
||||||
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
@ -25,32 +25,21 @@ pub(crate) async fn room_initial_sync_route(
|
||||||
return Err!(Request(Forbidden("No room preview available.")));
|
return Err!(Request(Forbidden("No room preview available.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
let membership = services
|
|
||||||
.rooms
|
|
||||||
.state_cache
|
|
||||||
.user_membership(body.sender_user(), room_id)
|
|
||||||
.map(Ok);
|
|
||||||
|
|
||||||
let visibility = services.rooms.directory.visibility(room_id).map(Ok);
|
|
||||||
|
|
||||||
let state = services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.room_state_full_pdus(room_id)
|
|
||||||
.map_ok(Event::into_format)
|
|
||||||
.try_collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let limit = LIMIT_MAX;
|
let limit = LIMIT_MAX;
|
||||||
let events = services
|
let events: Vec<_> = services
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_rev(None, room_id, None)
|
.pdus_rev(None, room_id, None)
|
||||||
.try_take(limit)
|
.try_take(limit)
|
||||||
.try_collect::<Vec<_>>();
|
.try_collect()
|
||||||
|
.await?;
|
||||||
|
|
||||||
let (membership, visibility, state, events) =
|
let state: Vec<_> = services
|
||||||
try_join4(membership, visibility, state, events)
|
.rooms
|
||||||
.boxed()
|
.state_accessor
|
||||||
|
.room_state_full_pdus(room_id)
|
||||||
|
.map_ok(PduEvent::into_state_event)
|
||||||
|
.try_collect()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let messages = PaginationChunk {
|
let messages = PaginationChunk {
|
||||||
|
@ -66,7 +55,7 @@ pub(crate) async fn room_initial_sync_route(
|
||||||
chunk: events
|
chunk: events
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(at!(1))
|
.map(at!(1))
|
||||||
.map(Event::into_format)
|
.map(PduEvent::into_room_event)
|
||||||
.collect(),
|
.collect(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -75,7 +64,11 @@ pub(crate) async fn room_initial_sync_route(
|
||||||
account_data: None,
|
account_data: None,
|
||||||
state: state.into(),
|
state: state.into(),
|
||||||
messages: messages.chunk.is_empty().or_some(messages),
|
messages: messages.chunk.is_empty().or_some(messages),
|
||||||
visibility: visibility.into(),
|
visibility: services.rooms.directory.visibility(room_id).await.into(),
|
||||||
membership,
|
membership: services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.user_membership(body.sender_user(), room_id)
|
||||||
|
.await,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,9 +43,10 @@ pub(crate) async fn get_room_summary_legacy(
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`
|
/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`
|
||||||
/// # `GET /_matrix/client/v1/room_summary/{roomIdOrAlias}`
|
|
||||||
///
|
///
|
||||||
/// Returns a short description of the state of a room.
|
/// Returns a short description of the state of a room.
|
||||||
|
///
|
||||||
|
/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266)
|
||||||
#[tracing::instrument(skip_all, fields(%client), name = "room_summary")]
|
#[tracing::instrument(skip_all, fields(%client), name = "room_summary")]
|
||||||
pub(crate) async fn get_room_summary(
|
pub(crate) async fn get_room_summary(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
|
@ -112,15 +113,13 @@ async fn local_room_summary_response(
|
||||||
) -> Result<get_summary::msc3266::Response> {
|
) -> Result<get_summary::msc3266::Response> {
|
||||||
trace!(?sender_user, "Sending local room summary response for {room_id:?}");
|
trace!(?sender_user, "Sending local room summary response for {room_id:?}");
|
||||||
let join_rule = services.rooms.state_accessor.get_join_rules(room_id);
|
let join_rule = services.rooms.state_accessor.get_join_rules(room_id);
|
||||||
|
|
||||||
let world_readable = services.rooms.state_accessor.is_world_readable(room_id);
|
let world_readable = services.rooms.state_accessor.is_world_readable(room_id);
|
||||||
|
|
||||||
let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id);
|
let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id);
|
||||||
|
|
||||||
let (join_rule, world_readable, guest_can_join) =
|
let (join_rule, world_readable, guest_can_join) =
|
||||||
join3(join_rule, world_readable, guest_can_join).await;
|
join3(join_rule, world_readable, guest_can_join).await;
|
||||||
|
|
||||||
trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}");
|
trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}");
|
||||||
|
|
||||||
user_can_see_summary(
|
user_can_see_summary(
|
||||||
services,
|
services,
|
||||||
room_id,
|
room_id,
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue