Compare commits

...

11 commits

Author SHA1 Message Date
nexy7574
5e71470131
fix(hydra): Fix ruma dependency 2025-08-30 17:01:12 +01:00
nexy7574
cfd68efb99
style: Reformat and whatnot 2025-08-30 17:00:31 +01:00
nexy7574
327fa02cd9
feat(hydra): Initial public commit for v12 support
# Conflicts:
#	src/core/info/room_version.rs
#	src/service/rooms/timeline/create.rs
2025-08-30 16:55:21 +01:00
Tom Foster
609e239436 fix(fedora): Correct linting issues in RPM spec file
The Fedora RPM packaging files added in PR #950 weren't passing pre-commit
checks, causing CI failures for any branches rebased after that merge. This
applies prek linting fixes (typo correction, trailing whitespace removal,
and EOF newline) to ensure CI passes for all contributors.
2025-08-30 16:10:41 +01:00
Ginger
34417c96ae Update URL to point at the landing page 2025-08-28 21:10:46 +00:00
Ginger
f33f281edb Update long description to match deb package 2025-08-28 21:10:46 +00:00
Ginger
ddbca59193 Add spec and service files for creating an RPM package 2025-08-28 21:10:46 +00:00
Tom Foster
b5a2e49ae4 fix: Resolve Clippy CI failures from elided lifetime warnings
The latest Rust nightly compiler (2025-08-27) introduced the
elided-named-lifetimes lint which causes Clippy CI checks to fail
when an elided lifetime ('_) resolves to a named lifetime that's
already in scope.

This commit fixes the Clippy warnings by:
- Making lifetime relationships explicit where 'a is already in scope
- Keeping elided lifetimes ('_) in functions without explicit
  lifetime parameters
- Ensuring proper lifetime handling in the database pool module

Affected files (17 total):
- Database map modules: Handle, Key, and KeyVal references in get,
  qry, keys, and stream operations
- Database pool module: into_recv_seek function

This change resolves the CI build failures without changing any
functionality, ensuring the codebase remains compatible with the
latest nightly Clippy checks.
2025-08-28 21:13:19 +01:00
Jade Ellis
37248a4f68
chore: Add reasons for test skips 2025-08-28 20:10:05 +01:00
Tom Foster
dd22325ea2 refactor(ci): Consolidate Rust checks with optimised toolchain setup
Merge rust-checks.yml into prek-checks.yml for a unified workflow that
runs formatting and clippy/test checks in parallel jobs.

Add reusable composite actions:
- setup-rust: Smart Rust toolchain management with caching
  * Uses cargo-binstall for pre-built binary downloads
  * Integrates Mozilla sccache-action for compilation caching
  * Workspace-relative paths for better cache control
  * GitHub token support for improved rate limits
- setup-llvm-with-apt: LLVM installation with native dependencies
- detect-runner-os: Consistent OS detection for cache keys

Key improvements:
- Install prek via cargo-binstall --git (crates.io outdated at v0.0.1)
- Download timelord-cli from cargo-quickinstall
- Set BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10 to avoid rate limit delays
- Default Rust version 1.87.0 with override support
- Remove redundant sccache stats (handled by Mozilla action)

Significantly reduces CI runtime through binary downloads instead of
compilation while maintaining all existing quality checks.
2025-08-28 19:20:14 +01:00
nex
30a56d5cb9
Update renovate.json 2025-08-28 17:15:32 +00:00
93 changed files with 1669 additions and 617 deletions

View file

@ -0,0 +1,39 @@
name: detect-runner-os
description: |
Detect the actual OS name and version of the runner.
Provides separate outputs for name, version, and a combined slug.
outputs:
name:
description: 'OS name (e.g. Ubuntu, Debian)'
value: ${{ steps.detect.outputs.name }}
version:
description: 'OS version (e.g. 22.04, 11)'
value: ${{ steps.detect.outputs.version }}
slug:
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
value: ${{ steps.detect.outputs.slug }}
runs:
using: composite
steps:
- name: Detect runner OS
id: detect
shell: bash
run: |
# Detect OS version (try lsb_release first, fall back to /etc/os-release)
OS_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d'"' -f2)
# Detect OS name and capitalise (try lsb_release first, fall back to /etc/os-release)
OS_NAME=$(lsb_release -is 2>/dev/null || grep "^ID=" /etc/os-release | cut -d'=' -f2 | tr -d '"' | sed 's/\b\(.\)/\u\1/g')
# Create combined slug
OS_SLUG="${OS_NAME}-${OS_VERSION}"
# Set outputs
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
# Log detection results
echo "🔍 Detected Runner OS: ${OS_NAME} ${OS_VERSION}"

View file

@ -0,0 +1,167 @@
name: setup-llvm-with-apt
description: |
Set up LLVM toolchain with APT package management and smart caching.
Supports cross-compilation architectures and additional package installation.
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
inputs:
dpkg-arch:
description: 'Debian architecture for cross-compilation (e.g. arm64)'
required: false
default: ''
extra-packages:
description: 'Additional APT packages to install (space-separated)'
required: false
default: ''
llvm-version:
description: 'LLVM version to install'
required: false
default: '20'
outputs:
llvm-version:
description: 'Installed LLVM version'
value: ${{ steps.configure.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: ./.forgejo/actions/detect-runner-os
- name: Configure cross-compilation architecture
if: inputs.dpkg-arch != ''
shell: bash
run: |
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
# Restrict default sources to amd64
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
# Add ports sources for foreign architecture
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
EOF
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
- name: Start LLVM cache group
shell: bash
run: echo "::group::📦 Restoring LLVM cache"
- name: Check for LLVM cache
id: cache
uses: https://github.com/actions/cache@v4
with:
path: |
/usr/bin/clang-*
/usr/bin/clang++-*
/usr/bin/lld-*
/usr/bin/llvm-*
/usr/lib/llvm-*/
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
/usr/lib/x86_64-linux-gnu/libclang*.so*
/etc/apt/sources.list.d/archive_uri-*
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
key: llvm-${{ steps.runner-os.outputs.slug }}-v${{ inputs.llvm-version }}-v3-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
- name: End LLVM cache group
shell: bash
run: echo "::endgroup::"
- name: Check and install LLVM if needed
id: llvm-setup
shell: bash
run: |
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
# Check both binaries and libraries exist
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
echo "needs-install=false" >> $GITHUB_OUTPUT
else
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
echo "::endgroup::"
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
exit 1
fi
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
echo "needs-install=true" >> $GITHUB_OUTPUT
fi
- name: Prepare for additional packages
if: inputs.extra-packages != ''
shell: bash
run: |
# Update APT if LLVM was cached (installer script already does apt-get update)
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
sudo apt-get update
echo "::endgroup::"
fi
echo "::group::📦 Installing additional packages"
- name: Install additional packages
if: inputs.extra-packages != ''
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
with:
packages: ${{ inputs.extra-packages }}
version: 1.0
- name: End package installation group
if: inputs.extra-packages != ''
shell: bash
run: echo "::endgroup::"
- name: Configure LLVM environment
id: configure
shell: bash
run: |
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
# Create symlinks
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
echo " ✓ Created symlinks"
# Setup library paths
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
if [ -d "$LLVM_LIB_PATH" ]; then
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
sudo ldconfig
echo " ✓ Configured library paths"
else
# Fallback to standard library location
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
echo " ✓ Using fallback library path"
fi
fi
# Set output
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
echo "::endgroup::"
echo "✅ LLVM ready: $(clang --version | head -1)"

View file

@ -0,0 +1,236 @@
name: setup-rust
description: |
Set up Rust toolchain with sccache for compilation caching.
Respects rust-toolchain.toml by default or accepts explicit version override.
inputs:
cache-key-suffix:
description: 'Optional suffix for cache keys (e.g. platform identifier)'
required: false
default: ''
rust-components:
description: 'Additional Rust components to install (space-separated)'
required: false
default: ''
rust-target:
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
required: false
default: ''
rust-version:
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
required: false
default: '1.87.0'
sccache-cache-limit:
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
required: false
default: '2G'
github-token:
description: 'GitHub token for downloading sccache from GitHub releases'
required: false
default: ''
outputs:
rust-version:
description: 'Installed Rust version'
value: ${{ steps.rust-setup.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: ./.forgejo/actions/detect-runner-os
- name: Configure Cargo environment
shell: bash
run: |
# Use workspace-relative paths for better control and consistency
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
# Limit binstall resolution timeout to avoid GitHub rate limit delays
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
# Ensure directories exist for first run
mkdir -p "${{ github.workspace }}/.cargo"
mkdir -p "${{ github.workspace }}/.sccache"
mkdir -p "${{ github.workspace }}/target"
mkdir -p "${{ github.workspace }}/.rustup"
- name: Start cache restore group
shell: bash
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
- name: Cache Cargo registry and git
id: registry-cache
uses: https://github.com/actions/cache@v4
with:
path: |
.cargo/registry/index
.cargo/registry/cache
.cargo/git/db
# Registry cache saved per workflow, restored from any workflow's cache
# Each workflow maintains its own registry that accumulates its needed crates
key: cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ github.workflow }}
restore-keys: |
cargo-registry-${{ steps.runner-os.outputs.slug }}-
- name: Cache toolchain binaries
id: toolchain-cache
uses: https://github.com/actions/cache@v4
with:
path: |
.cargo/bin
.rustup/toolchains
.rustup/update-hashes
# Shared toolchain cache across all Rust versions
key: toolchain-${{ steps.runner-os.outputs.slug }}
- name: Debug GitHub token availability
shell: bash
run: |
if [ -z "${{ inputs.github-token }}" ]; then
echo "⚠️ No GitHub token provided - sccache will use fallback download method"
else
echo "✅ GitHub token provided for sccache"
fi
- name: Setup sccache
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
with:
token: ${{ inputs.github-token }}
- name: Cache build artifacts
id: build-cache
uses: https://github.com/actions/cache@v4
with:
path: |
target/**/deps
!target/**/deps/*.rlib
target/**/build
target/**/.fingerprint
target/**/incremental
target/**/*.d
/timelord/
# Build artifacts - cache per code change, restore from deps when code changes
key: >-
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
restore-keys: |
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
- name: End cache restore group
shell: bash
run: echo "::endgroup::"
- name: Setup Rust toolchain
shell: bash
run: |
# Install rustup if not already cached
if ! command -v rustup &> /dev/null; then
echo "::group::📦 Installing rustup"
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
source "$CARGO_HOME/env"
echo "::endgroup::"
else
echo "✅ rustup already available"
fi
# Setup the appropriate Rust version
if [[ -n "${{ inputs.rust-version }}" ]]; then
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
# Set override first to prevent rust-toolchain.toml from auto-installing
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
# Check if we need to install/update the toolchain
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
rustup update ${{ inputs.rust-version }}
else
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
fi
else
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
rustup show
fi
echo "::endgroup::"
- name: Configure PATH and install tools
shell: bash
env:
GITHUB_TOKEN: ${{ inputs.github-token }}
run: |
# Add .cargo/bin to PATH permanently for all subsequent steps
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
# Install cargo-binstall for fast binary installations
if command -v cargo-binstall &> /dev/null; then
echo "✅ cargo-binstall already available"
else
echo "::group::📦 Installing cargo-binstall"
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
echo "::endgroup::"
fi
if command -v prek &> /dev/null; then
echo "✅ prek already available"
else
echo "::group::📦 Installing prek"
# prek isn't regularly published to crates.io, so we use git source
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
echo "::endgroup::"
fi
if command -v timelord &> /dev/null; then
echo "✅ timelord already available"
else
echo "::group::📦 Installing timelord"
cargo-binstall -y --no-symlinks timelord-cli
echo "::endgroup::"
fi
- name: Configure sccache environment
shell: bash
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
# Configure incremental compilation GC
# If we restored from old cache (partial hit), clean up aggressively
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
echo "♻️ Partial cache hit - enabling cache cleanup"
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
fi
- name: Install Rust components
if: inputs.rust-components != ''
shell: bash
run: |
echo "📦 Installing components: ${{ inputs.rust-components }}"
rustup component add ${{ inputs.rust-components }}
- name: Install Rust target
if: inputs.rust-target != ''
shell: bash
run: |
echo "📦 Installing target: ${{ inputs.rust-target }}"
rustup target add ${{ inputs.rust-target }}
- name: Output version and summary
id: rust-setup
shell: bash
run: |
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
echo "📋 Setup complete:"
echo " Rust: $(rustc --version)"
echo " Cargo: $(cargo --version)"
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"

View file

@ -2,7 +2,6 @@ name: Checks / Prek
on: on:
push: push:
pull_request:
permissions: permissions:
contents: read contents: read
@ -17,18 +16,64 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- name: Install uv - name: Setup Rust nightly
uses: https://github.com/astral-sh/setup-uv@v5 uses: ./.forgejo/actions/setup-rust
with: with:
enable-cache: true rust-version: nightly
ignore-nothing-to-cache: true github-token: ${{ secrets.GH_PUBLIC_RO }}
cache-dependency-glob: ''
- name: Run prek - name: Run prek
run: | run: |
uvx prek run \ prek run \
--all-files \ --all-files \
--hook-stage manual \ --hook-stage manual \
--show-diff-on-failure \ --show-diff-on-failure \
--color=always \ --color=always \
-v -v
- name: Check Rust formatting
run: |
cargo +nightly fmt --all -- --check && \
echo "✅ Formatting check passed" || \
exit 1
clippy-and-tests:
name: Clippy and Cargo Tests
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup LLVM
uses: ./.forgejo/actions/setup-llvm-with-apt
with:
extra-packages: liburing-dev liburing2
- name: Setup Rust with caching
uses: ./.forgejo/actions/setup-rust
with:
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run Clippy lints
run: |
cargo clippy \
--workspace \
--features full \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Run Cargo tests
run: |
cargo test \
--workspace \
--features full \
--locked \
--profile test \
--all-targets \
--no-fail-fast

View file

@ -1,144 +0,0 @@
name: Checks / Rust
on:
push:
jobs:
format:
name: Format
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
with:
toolchain: "nightly"
components: "rustfmt"
- name: Check formatting
run: |
cargo +nightly fmt --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Clippy
run: |
cargo clippy \
--workspace \
--features full \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Show sccache stats
if: always()
run: sccache --show-stats
cargo-test:
name: Cargo Test
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Cargo Test
run: |
cargo test \
--workspace \
--features full \
--locked \
--profile test \
--all-targets \
--no-fail-fast
- name: Show sccache stats
if: always()
run: sccache --show-stats

22
Cargo.lock generated
View file

@ -4058,7 +4058,7 @@ checksum = "88f8660c1ff60292143c98d08fc6e2f654d722db50410e3f3797d40baaf9d8f3"
[[package]] [[package]]
name = "ruma" name = "ruma"
version = "0.10.1" version = "0.10.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"assign", "assign",
"js_int", "js_int",
@ -4078,7 +4078,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-appservice-api" name = "ruma-appservice-api"
version = "0.10.0" version = "0.10.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-common", "ruma-common",
@ -4090,7 +4090,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-client-api" name = "ruma-client-api"
version = "0.18.0" version = "0.18.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"as_variant", "as_variant",
"assign", "assign",
@ -4113,7 +4113,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-common" name = "ruma-common"
version = "0.13.0" version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"as_variant", "as_variant",
"base64 0.22.1", "base64 0.22.1",
@ -4145,7 +4145,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events" name = "ruma-events"
version = "0.28.1" version = "0.28.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"as_variant", "as_variant",
"indexmap 2.10.0", "indexmap 2.10.0",
@ -4170,7 +4170,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-federation-api" name = "ruma-federation-api"
version = "0.9.0" version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"bytes", "bytes",
"headers", "headers",
@ -4192,7 +4192,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-validation" name = "ruma-identifiers-validation"
version = "0.9.5" version = "0.9.5"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"js_int", "js_int",
"thiserror 2.0.12", "thiserror 2.0.12",
@ -4201,7 +4201,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identity-service-api" name = "ruma-identity-service-api"
version = "0.9.0" version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-common", "ruma-common",
@ -4211,7 +4211,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-macros" name = "ruma-macros"
version = "0.13.0" version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"proc-macro-crate", "proc-macro-crate",
@ -4226,7 +4226,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-push-gateway-api" name = "ruma-push-gateway-api"
version = "0.9.0" version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-common", "ruma-common",
@ -4238,7 +4238,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-signatures" name = "ruma-signatures"
version = "0.15.0" version = "0.15.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da" source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"ed25519-dalek", "ed25519-dalek",

View file

@ -352,7 +352,7 @@ version = "0.1.2"
[workspace.dependencies.ruma] [workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma" git = "https://forgejo.ellis.link/continuwuation/ruwuma"
#branch = "conduwuit-changes" #branch = "conduwuit-changes"
rev = "b753738047d1f443aca870896ef27ecaacf027da" rev = "191e5c541e8339080bebb5ac6855a682330bb886"
features = [ features = [
"compat", "compat",
"rand", "rand",

68
fedora/conduwuit.service Normal file
View file

@ -0,0 +1,68 @@
[Unit]
Description=Continuwuity - Matrix homeserver
Documentation=https://continuwuity.org/
Wants=network-online.target
After=network-online.target
Alias=matrix-conduwuit.service
[Service]
DynamicUser=yes
User=conduwuit
Group=conduwuit
Type=notify
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
ExecStart=/usr/bin/conduwuit
AmbientCapabilities=
CapabilityBoundingSet=
DevicePolicy=closed
LockPersonality=yes
MemoryDenyWriteExecute=yes
NoNewPrivileges=yes
#ProcSubset=pid
ProtectClock=yes
ProtectControlGroups=yes
ProtectHome=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectKernelTunables=yes
ProtectProc=invisible
ProtectSystem=strict
PrivateDevices=yes
PrivateMounts=yes
PrivateTmp=yes
PrivateUsers=yes
PrivateIPC=yes
RemoveIPC=yes
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
StateDirectory=conduwuit
ConfigurationDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Restart=on-failure
RestartSec=5
TimeoutStopSec=2m
TimeoutStartSec=2m
StartLimitInterval=1m
StartLimitBurst=5
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,80 @@
# This should be run using rpkg-util: https://docs.pagure.org/rpkg-util
# it requires Internet access and is not suitable for Fedora main repos
# TODO: rpkg-util is no longer maintained, find a replacement
Name: continuwuity
Version: {{{ git_repo_version }}}
Release: 1%{?dist}
Summary: Very cool Matrix chat homeserver written in Rust
License: Apache-2.0 AND MIT
URL: https://continuwuity.org
VCS: {{{ git_repo_vcs }}}
Source: {{{ git_repo_pack }}}
BuildRequires: cargo-rpm-macros >= 25
BuildRequires: systemd-rpm-macros
# Needed to build rust-librocksdb-sys
BuildRequires: clang
BuildRequires: liburing-devel
Requires: liburing
Requires: glibc
Requires: libstdc++
%global _description %{expand:
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
%description %{_description}
%prep
{{{ git_repo_setup_macro }}}
%cargo_prep -N
# Perform an online build so Git dependencies can be retrieved
sed -i 's/^offline = true$//' .cargo/config.toml
%build
%cargo_build
# Here's the one legally required mystery incantation in this file.
# Some of our dependencies have source files which are (for some reason) marked as executable.
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
# at the end of the build step, and then the BRP shebang mangling script checks
# the entire buildroot to find executable files, and fails the build because
# it thinks Rust's file attributes are shebangs because they start with `#!`.
# So we have to clear the executable bit on all of them before that happens.
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
# TODO: this fails currently because it's forced to run in offline mode
# {cargo_license -- --no-dev} > LICENSE.dependencies
%install
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
install -Dpm0644 fedora/conduwuit.service -t %{buildroot}%{_unitdir}
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
%files
%license LICENSE
%license src/core/matrix/state_res/LICENSE
%doc CODE_OF_CONDUCT.md
%doc CONTRIBUTING.md
%doc README.md
%doc SECURITY.md
%config %{_sysconfdir}/conduwuit/conduwuit.toml
%{_bindir}/conduwuit
%{_unitdir}/conduwuit.service
# Do not create /var/lib/conduwuit, systemd will create it if necessary
%post
%systemd_post conduwuit.service
%preun
%systemd_preun conduwuit.service
%postun
%systemd_postun_with_restart conduwuit.service
%changelog
{{{ git_repo_changelog }}}

View file

@ -13,8 +13,8 @@
"enabled": true "enabled": true
}, },
"labels": [ "labels": [
"dependencies", "Dependencies",
"github_actions" "Dependencies/Renovate"
], ],
"ignoreDeps": [ "ignoreDeps": [
"tikv-jemallocator", "tikv-jemallocator",

View file

@ -756,7 +756,7 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content), PduBuilder::state(String::new(), &power_levels_content),
&user_id, &user_id,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
@ -901,7 +901,13 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
); );
let redaction_event_id = { let redaction_event_id = {
let state_lock = self.services.rooms.state.mutex.lock(event.room_id()).await; let state_lock = self
.services
.rooms
.state
.mutex
.lock(&event.room_id_or_hash())
.await;
self.services self.services
.rooms .rooms
@ -915,7 +921,7 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
}) })
}, },
event.sender(), event.sender(),
event.room_id(), Some(&event.room_id_or_hash()),
&state_lock, &state_lock,
) )
.await? .await?

View file

@ -929,7 +929,7 @@ pub async fn full_user_deactivate(
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content), PduBuilder::state(String::new(), &power_levels_content),
user_id, user_id,
room_id, Some(room_id),
&state_lock, &state_lock,
) )
.await .await

View file

@ -69,7 +69,7 @@ pub(crate) async fn get_context_route(
let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?; let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?;
if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id { if base_pdu.room_id_or_hash() != *room_id || base_pdu.event_id != *event_id {
return Err!(Request(NotFound("Base event not found."))); return Err!(Request(NotFound("Base event not found.")));
} }

View file

@ -49,7 +49,7 @@ pub(crate) async fn ban_user_route(
..current_member_content ..current_member_content
}), }),
sender_user, sender_user,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -128,12 +128,12 @@ pub(crate) async fn invite_helper(
.create_hash_and_sign_event( .create_hash_and_sign_event(
PduBuilder::state(user_id.to_string(), &content), PduBuilder::state(user_id.to_string(), &content),
sender_user, sender_user,
room_id, Some(room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
let invite_room_state = services.rooms.state.summary_stripped(&pdu).await; let invite_room_state = services.rooms.state.summary_stripped(&pdu, room_id).await;
drop(state_lock); drop(state_lock);
@ -227,7 +227,7 @@ pub(crate) async fn invite_helper(
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(user_id.to_string(), &content), PduBuilder::state(user_id.to_string(), &content),
sender_user, sender_user,
room_id, Some(room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -18,7 +18,7 @@ use conduwuit::{
}, },
warn, warn,
}; };
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt, TryFutureExt};
use ruma::{ use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
RoomVersionId, UserId, RoomVersionId, UserId,
@ -550,12 +550,20 @@ async fn join_room_by_id_helper_remote(
.iter() .iter()
.stream() .stream()
.then(|pdu| { .then(|pdu| {
debug!(?pdu, "Validating send_join response room_state event");
services services
.server_keys .server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_id) .validate_and_add_event_id_no_fetch(pdu, &room_version_id)
.inspect_err(|e| {
debug_warn!(
"Could not validate send_join response room_state event: {e:?}"
);
})
.inspect(|_| debug!("Completed validating send_join response room_state event"))
}) })
.ready_filter_map(Result::ok) .ready_filter_map(Result::ok)
.fold(HashMap::new(), |mut state, (event_id, value)| async move { .fold(HashMap::new(), |mut state, (event_id, value)| async move {
debug!(?event_id, "Processing send_join response room_state event");
let pdu = match PduEvent::from_id_val(&event_id, value.clone()) { let pdu = match PduEvent::from_id_val(&event_id, value.clone()) {
| Ok(pdu) => pdu, | Ok(pdu) => pdu,
| Err(e) => { | Err(e) => {
@ -563,9 +571,10 @@ async fn join_room_by_id_helper_remote(
return state; return state;
}, },
}; };
debug!(event_id = ?event_id.clone(), "Adding PDU outlier for send_join response room_state event");
services.rooms.outlier.add_pdu_outlier(&event_id, &value); services.rooms.outlier.add_pdu_outlier(&event_id, &value);
if let Some(state_key) = &pdu.state_key { if let Some(state_key) = &pdu.state_key {
debug!(?state_key, "Creating shortstatekey for state event in send_join response");
let shortstatekey = services let shortstatekey = services
.rooms .rooms
.short .short
@ -574,7 +583,7 @@ async fn join_room_by_id_helper_remote(
state.insert(shortstatekey, pdu.event_id.clone()); state.insert(shortstatekey, pdu.event_id.clone());
} }
debug!("Completed send_join response");
state state
}) })
.await; .await;
@ -615,6 +624,9 @@ async fn join_room_by_id_helper_remote(
&parsed_join_pdu, &parsed_join_pdu,
None, // TODO: third party invite None, // TODO: third party invite
|k, s| state_fetch(k.clone(), s.into()), |k, s| state_fetch(k.clone(), s.into()),
&state_fetch(StateEventType::RoomCreate, "".into())
.await
.expect("create event is missing from send_join auth"),
) )
.await .await
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
@ -662,7 +674,7 @@ async fn join_room_by_id_helper_remote(
let statehash_after_join = services let statehash_after_join = services
.rooms .rooms
.state .state
.append_to_state(&parsed_join_pdu) .append_to_state(&parsed_join_pdu, room_id)
.await?; .await?;
info!("Appending new room join event"); info!("Appending new room join event");
@ -674,6 +686,7 @@ async fn join_room_by_id_helper_remote(
join_event, join_event,
once(parsed_join_pdu.event_id.borrow()), once(parsed_join_pdu.event_id.borrow()),
&state_lock, &state_lock,
room_id,
) )
.await?; .await?;
@ -773,7 +786,7 @@ async fn join_room_by_id_helper_local(
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content), PduBuilder::state(sender_user.to_string(), &content),
sender_user, sender_user,
room_id, Some(room_id),
&state_lock, &state_lock,
) )
.await .await

View file

@ -54,7 +54,7 @@ pub(crate) async fn kick_user_route(
..event ..event
}), }),
sender_user, sender_user,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -373,7 +373,7 @@ async fn knock_room_helper_local(
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content), PduBuilder::state(sender_user.to_string(), &content),
sender_user, sender_user,
room_id, Some(room_id),
&state_lock, &state_lock,
) )
.await .await
@ -502,6 +502,7 @@ async fn knock_room_helper_local(
knock_event, knock_event,
once(parsed_knock_pdu.event_id.borrow()), once(parsed_knock_pdu.event_id.borrow()),
&state_lock, &state_lock,
room_id,
) )
.await?; .await?;
@ -672,7 +673,7 @@ async fn knock_room_helper_remote(
let statehash_after_knock = services let statehash_after_knock = services
.rooms .rooms
.state .state
.append_to_state(&parsed_knock_pdu) .append_to_state(&parsed_knock_pdu, room_id)
.await?; .await?;
info!("Updating membership locally to knock state with provided stripped state events"); info!("Updating membership locally to knock state with provided stripped state events");
@ -701,6 +702,7 @@ async fn knock_room_helper_remote(
knock_event, knock_event,
once(parsed_knock_pdu.event_id.borrow()), once(parsed_knock_pdu.event_id.borrow()),
&state_lock, &state_lock,
room_id,
) )
.await?; .await?;

View file

@ -206,7 +206,7 @@ pub async fn leave_room(
..event ..event
}), }),
user_id, user_id,
room_id, Some(room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -70,9 +70,10 @@ pub(crate) async fn banned_room_check(
if let Some(room_id) = room_id { if let Some(room_id) = room_id {
if services.rooms.metadata.is_banned(room_id).await if services.rooms.metadata.is_banned(room_id).await
|| services || (room_id.server_name().is_some()
.moderation && services
.is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) .moderation
.is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")))
{ {
warn!( warn!(
"User {user_id} who is not an admin attempted to send an invite for or \ "User {user_id} who is not an admin attempted to send an invite for or \

View file

@ -47,7 +47,7 @@ pub(crate) async fn unban_user_route(
..current_member_content ..current_member_content
}), }),
sender_user, sender_user,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -310,7 +310,7 @@ pub(crate) async fn visibility_filter(
services services
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_event(user_id, pdu.room_id(), pdu.event_id()) .user_can_see_event(user_id, &pdu.room_id_or_hash(), pdu.event_id())
.await .await
.then_some(item) .then_some(item)
} }

View file

@ -423,7 +423,7 @@ pub async fn update_all_rooms(
if let Err(e) = services if let Err(e) = services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu(pdu_builder, user_id, room_id, &state_lock) .build_and_append_pdu(pdu_builder, user_id, Some(room_id), &state_lock)
.await .await
{ {
warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}"); warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}");

View file

@ -36,7 +36,7 @@ pub(crate) async fn redact_event_route(
}) })
}, },
sender_user, sender_user,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -223,7 +223,7 @@ async fn visibility_filter<Pdu: Event + Send + Sync>(
services services
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_event(sender_user, pdu.room_id(), pdu.event_id()) .user_can_see_event(sender_user, &pdu.room_id_or_hash(), pdu.event_id())
.await .await
.then_some(item) .then_some(item)
} }

View file

@ -2,7 +2,7 @@ use std::{fmt::Write as _, ops::Mul, time::Duration};
use axum::extract::State; use axum::extract::State;
use axum_client_ip::InsecureClientIp; use axum_client_ip::InsecureClientIp;
use conduwuit::{Err, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; use conduwuit::{Err, Event, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
use conduwuit_service::Services; use conduwuit_service::Services;
use rand::Rng; use rand::Rng;
use ruma::{ use ruma::{
@ -200,7 +200,7 @@ async fn is_event_report_valid(
valid" valid"
); );
if room_id != pdu.room_id { if room_id != pdu.room_id_or_hash() {
return Err!(Request(NotFound("Event ID does not belong to the reported room",))); return Err!(Request(NotFound("Event ID does not belong to the reported room",)));
} }

View file

@ -2,7 +2,7 @@ use std::collections::BTreeMap;
use axum::extract::State; use axum::extract::State;
use conduwuit::{ use conduwuit::{
Err, Result, debug_info, debug_warn, err, info, Err, Result, RoomVersion, debug, debug_info, debug_warn, err, info,
matrix::{StateKey, pdu::PduBuilder}, matrix::{StateKey, pdu::PduBuilder},
warn, warn,
}; };
@ -68,51 +68,6 @@ pub(crate) async fn create_room_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
} }
let room_id: OwnedRoomId = match &body.room_id {
| Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?,
| _ => RoomId::new(&services.server.name),
};
// check if room ID doesn't already exist instead of erroring on auth check
if services.rooms.short.get_shortroomid(&room_id).await.is_ok() {
return Err!(Request(RoomInUse("Room with that custom room ID already exists",)));
}
if body.visibility == room::Visibility::Public
&& services.server.config.lockdown_public_room_directory
&& !services.users.is_admin(sender_user).await
&& body.appservice_info.is_none()
{
warn!(
"Non-admin user {sender_user} tried to publish {room_id} to the room directory \
while \"lockdown_public_room_directory\" is enabled"
);
if services.server.config.admin_room_notices {
services
.admin
.notice(&format!(
"Non-admin user {sender_user} tried to publish {room_id} to the room \
directory while \"lockdown_public_room_directory\" is enabled"
))
.await;
}
return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed")));
}
let _short_id = services
.rooms
.short
.get_or_create_shortroomid(&room_id)
.await;
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
let alias: Option<OwnedRoomAliasId> = match body.room_alias_name.as_ref() {
| Some(alias) =>
Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?),
| _ => None,
};
let room_version = match body.room_version.clone() { let room_version = match body.room_version.clone() {
| Some(room_version) => | Some(room_version) =>
if services.server.supported_room_version(&room_version) { if services.server.supported_room_version(&room_version) {
@ -124,6 +79,51 @@ pub(crate) async fn create_room_route(
}, },
| None => services.server.config.default_room_version.clone(), | None => services.server.config.default_room_version.clone(),
}; };
let room_features = RoomVersion::new(&room_version)?;
let room_id: Option<OwnedRoomId> = match room_features.room_ids_as_hashes {
| true => None,
| false => match &body.room_id {
| Some(custom_room_id) => Some(custom_room_id_check(&services, custom_room_id)?),
| None => Some(RoomId::new(services.globals.server_name())),
},
};
// check if room ID doesn't already exist instead of erroring on auth check
if let Some(ref room_id) = room_id {
if services.rooms.short.get_shortroomid(room_id).await.is_ok() {
return Err!(Request(RoomInUse("Room with that custom room ID already exists",)));
}
}
if body.visibility == room::Visibility::Public
&& services.server.config.lockdown_public_room_directory
&& !services.users.is_admin(sender_user).await
&& body.appservice_info.is_none()
{
warn!(
"Non-admin user {sender_user} tried to publish {room_id:?} to the room directory \
while \"lockdown_public_room_directory\" is enabled"
);
if services.server.config.admin_room_notices {
services
.admin
.notice(&format!(
"Non-admin user {sender_user} tried to publish {room_id:?} to the room \
directory while \"lockdown_public_room_directory\" is enabled"
))
.await;
}
return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed")));
}
let alias: Option<OwnedRoomAliasId> = match body.room_alias_name.as_ref() {
| Some(alias) =>
Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?),
| _ => None,
};
let create_content = match &body.creation_content { let create_content = match &body.creation_content {
| Some(content) => { | Some(content) => {
@ -156,6 +156,10 @@ pub(crate) async fn create_room_route(
.try_into() .try_into()
.map_err(|e| err!(Request(BadJson("Invalid creation content: {e}"))))?, .map_err(|e| err!(Request(BadJson("Invalid creation content: {e}"))))?,
); );
if room_version == V12 {
// TODO(hydra): v12 rooms cannot be federated until they are stable.
content.insert("m.federate".into(), false.into());
}
content content
}, },
| None => { | None => {
@ -164,18 +168,32 @@ pub(crate) async fn create_room_route(
let content = match room_version { let content = match room_version {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
RoomCreateEventContent::new_v1(sender_user.to_owned()), RoomCreateEventContent::new_v1(sender_user.to_owned()),
| _ => RoomCreateEventContent::new_v11(), | V11 => RoomCreateEventContent::new_v11(),
| _ => RoomCreateEventContent::new_v12(),
}; };
let mut content = let mut content =
serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get()) serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())?;
.unwrap();
content.insert("room_version".into(), json!(room_version.as_str()).try_into()?); content.insert("room_version".into(), json!(room_version.as_str()).try_into()?);
if room_version == V12 {
// TODO(hydra): v12 rooms cannot be federated until they are stable.
content.insert("m.federate".into(), false.into());
}
content content
}, },
}; };
let state_lock = match room_id.clone() {
| Some(room_id) => services.rooms.state.mutex.lock(&room_id).await,
| None => {
let temp_room_id = RoomId::new(services.globals.server_name());
debug_info!("Locking temporary room state mutex for {temp_room_id}");
services.rooms.state.mutex.lock(&temp_room_id).await
},
};
// 1. The room create event // 1. The room create event
services debug!("Creating room create event for {sender_user} in room {room_id:?}");
let create_event_id = services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu( .build_and_append_pdu(
@ -186,13 +204,26 @@ pub(crate) async fn create_room_route(
..Default::default() ..Default::default()
}, },
sender_user, sender_user,
&room_id, None,
&state_lock, &state_lock,
) )
.boxed() .boxed()
.await?; .await?;
debug!("Created room create event with ID {}", create_event_id);
let room_id = match room_id {
| Some(room_id) => room_id,
| None => {
let as_room_id = create_event_id.as_str().replace('$', "!");
debug_info!("Creating room with v12 room ID {as_room_id}");
RoomId::parse(&as_room_id)?.to_owned()
},
};
drop(state_lock);
debug!("Room created with ID {room_id}");
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
// 2. Let the room creator join // 2. Let the room creator join
debug_info!("Joining {sender_user} to room {room_id}");
services services
.rooms .rooms
.timeline .timeline
@ -205,7 +236,7 @@ pub(crate) async fn create_room_route(
..RoomMemberEventContent::new(MembershipState::Join) ..RoomMemberEventContent::new(MembershipState::Join)
}), }),
sender_user, sender_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -235,10 +266,28 @@ pub(crate) async fn create_room_route(
} }
} }
let mut creators: Vec<OwnedUserId> = vec![sender_user.to_owned()];
if let Some(additional_creators) = create_content.get("additional_creators") {
if let Some(additional_creators) = additional_creators.as_array() {
for creator in additional_creators {
if let Some(creator) = creator.as_str() {
if let Ok(creator) = OwnedUserId::parse(creator) {
creators.push(creator.clone());
users.insert(creator.clone(), int!(100));
}
}
}
}
}
if !(RoomVersion::new(&room_version)?).explicitly_privilege_room_creators {
creators.clear();
}
let power_levels_content = default_power_levels_content( let power_levels_content = default_power_levels_content(
body.power_level_content_override.as_ref(), body.power_level_content_override.as_ref(),
&body.visibility, &body.visibility,
users, users,
creators,
)?; )?;
services services
@ -252,7 +301,7 @@ pub(crate) async fn create_room_route(
..Default::default() ..Default::default()
}, },
sender_user, sender_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -269,7 +318,7 @@ pub(crate) async fn create_room_route(
alt_aliases: vec![], alt_aliases: vec![],
}), }),
sender_user, sender_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -292,7 +341,7 @@ pub(crate) async fn create_room_route(
}), }),
), ),
sender_user, sender_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -308,7 +357,7 @@ pub(crate) async fn create_room_route(
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared), &RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
), ),
sender_user, sender_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -327,7 +376,7 @@ pub(crate) async fn create_room_route(
}), }),
), ),
sender_user, sender_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -363,7 +412,7 @@ pub(crate) async fn create_room_route(
services services
.rooms .rooms
.timeline .timeline
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) .build_and_append_pdu(pdu_builder, sender_user, Some(&room_id), &state_lock)
.boxed() .boxed()
.await?; .await?;
} }
@ -376,7 +425,7 @@ pub(crate) async fn create_room_route(
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())), PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())),
sender_user, sender_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -390,7 +439,7 @@ pub(crate) async fn create_room_route(
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &RoomTopicEventContent { topic: topic.clone() }), PduBuilder::state(String::new(), &RoomTopicEventContent { topic: topic.clone() }),
sender_user, sender_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -450,6 +499,7 @@ fn default_power_levels_content(
power_level_content_override: Option<&Raw<RoomPowerLevelsEventContent>>, power_level_content_override: Option<&Raw<RoomPowerLevelsEventContent>>,
visibility: &room::Visibility, visibility: &room::Visibility,
users: BTreeMap<OwnedUserId, Int>, users: BTreeMap<OwnedUserId, Int>,
creators: Vec<OwnedUserId>,
) -> Result<serde_json::Value> { ) -> Result<serde_json::Value> {
let mut power_levels_content = let mut power_levels_content =
serde_json::to_value(RoomPowerLevelsEventContent { users, ..Default::default() }) serde_json::to_value(RoomPowerLevelsEventContent { users, ..Default::default() })
@ -499,6 +549,19 @@ fn default_power_levels_content(
} }
} }
if !creators.is_empty() {
// Raise the default power level of tombstone to 150
power_levels_content["events"]["m.room.tombstone"] =
serde_json::to_value(150).expect("150 is valid Value");
for creator in creators {
// Omit creators from the power level list altogether
power_levels_content["users"]
.as_object_mut()
.expect("users is an object")
.remove(creator.as_str());
}
}
Ok(power_levels_content) Ok(power_levels_content)
} }

View file

@ -34,7 +34,7 @@ pub(crate) async fn get_room_event_route(
} }
debug_assert!( debug_assert!(
event.event_id() == event_id && event.room_id() == room_id, event.event_id() == event_id && event.room_id_or_hash() == *room_id,
"Fetched PDU must match requested" "Fetched PDU must match requested"
); );

View file

@ -91,7 +91,7 @@ pub(crate) async fn upgrade_room_route(
replacement_room: replacement_room.clone(), replacement_room: replacement_room.clone(),
}), }),
sender_user, sender_user,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
@ -173,7 +173,7 @@ pub(crate) async fn upgrade_room_route(
timestamp: None, timestamp: None,
}, },
sender_user, sender_user,
&replacement_room, Some(&replacement_room),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -204,7 +204,7 @@ pub(crate) async fn upgrade_room_route(
timestamp: None, timestamp: None,
}, },
sender_user, sender_user,
&replacement_room, Some(&replacement_room),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -243,7 +243,7 @@ pub(crate) async fn upgrade_room_route(
..Default::default() ..Default::default()
}, },
sender_user, sender_user,
&replacement_room, Some(&replacement_room),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -302,7 +302,7 @@ pub(crate) async fn upgrade_room_route(
..power_levels_event_content ..power_levels_event_content
}), }),
sender_user, sender_user,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -352,7 +352,7 @@ pub(crate) async fn upgrade_room_route(
..Default::default() ..Default::default()
}, },
sender_user, sender_user,
space_id, Some(space_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -376,7 +376,7 @@ pub(crate) async fn upgrade_room_route(
..Default::default() ..Default::default()
}, },
sender_user, sender_user,
space_id, Some(space_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()

View file

@ -80,7 +80,7 @@ pub(crate) async fn send_message_event_route(
..Default::default() ..Default::default()
}, },
sender_user, sender_user,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -201,7 +201,7 @@ async fn send_state_event_for_key_helper(
..Default::default() ..Default::default()
}, },
sender, sender,
room_id, Some(room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -457,7 +457,7 @@ async fn handle_left_room(
state_key: Some(sender_user.as_str().into()), state_key: Some(sender_user.as_str().into()),
unsigned: None, unsigned: None,
// The following keys are dropped on conversion // The following keys are dropped on conversion
room_id: room_id.clone(), room_id: Some(room_id.clone()),
prev_events: vec![], prev_events: vec![],
depth: uint!(1), depth: uint!(1),
auth_events: vec![], auth_events: vec![],

View file

@ -2,7 +2,7 @@ use std::cmp;
use axum::extract::State; use axum::extract::State;
use conduwuit::{ use conduwuit::{
PduCount, Result, Event, PduCount, Result,
utils::{IterStream, ReadyExt, stream::TryTools}, utils::{IterStream, ReadyExt, stream::TryTools},
}; };
use futures::{FutureExt, StreamExt, TryStreamExt}; use futures::{FutureExt, StreamExt, TryStreamExt};
@ -68,7 +68,7 @@ pub(crate) async fn get_backfill_route(
Ok(services Ok(services
.rooms .rooms
.state_accessor .state_accessor
.server_can_see_event(body.origin(), &pdu.room_id, &pdu.event_id) .server_can_see_event(body.origin(), &pdu.room_id_or_hash(), &pdu.event_id)
.await .await
.then_some(pdu)) .then_some(pdu))
}) })

View file

@ -122,7 +122,7 @@ pub(crate) async fn create_join_event_template_route(
..RoomMemberEventContent::new(MembershipState::Join) ..RoomMemberEventContent::new(MembershipState::Join)
}), }),
&body.user_id, &body.user_id,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -95,7 +95,7 @@ pub(crate) async fn create_knock_event_template_route(
&RoomMemberEventContent::new(MembershipState::Knock), &RoomMemberEventContent::new(MembershipState::Knock),
), ),
&body.user_id, &body.user_id,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -45,7 +45,7 @@ pub(crate) async fn create_leave_event_template_route(
&RoomMemberEventContent::new(MembershipState::Leave), &RoomMemberEventContent::new(MembershipState::Leave),
), ),
&body.user_id, &body.user_id,
&body.room_id, Some(&body.room_id),
&state_lock, &state_lock,
) )
.await?; .await?;

View file

@ -138,6 +138,7 @@ async fn handle(
pdus: impl Stream<Item = Pdu> + Send, pdus: impl Stream<Item = Pdu> + Send,
edus: impl Stream<Item = Edu> + Send, edus: impl Stream<Item = Edu> + Send,
) -> Result<ResolvedMap> { ) -> Result<ResolvedMap> {
// TODO(hydra): Does having no room ID break this?
// group pdus by room // group pdus by room
let pdus = pdus let pdus = pdus
.collect() .collect()
@ -186,6 +187,7 @@ async fn handle_room(
.lock(&room_id) .lock(&room_id)
.await; .await;
// TODO(hydra): We might be missing a room ID
let room_id = &room_id; let room_id = &room_id;
pdus.try_stream() pdus.try_stream()
.and_then(|(_, event_id, value)| async move { .and_then(|(_, event_id, value)| async move {

View file

@ -175,7 +175,11 @@ pub(crate) async fn create_knock_event_v1_route(
.send_pdu_room(&body.room_id, &pdu_id) .send_pdu_room(&body.room_id, &pdu_id)
.await?; .await?;
let knock_room_state = services.rooms.state.summary_stripped(&pdu).await; let knock_room_state = services
.rooms
.state
.summary_stripped(&pdu, &body.room_id)
.await;
Ok(send_knock::v1::Response { knock_room_state }) Ok(send_knock::v1::Response { knock_room_state })
} }

View file

@ -18,7 +18,7 @@ pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[
/// Experimental, partially supported room versions /// Experimental, partially supported room versions
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] = pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; &[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5, RoomVersionId::V12];
type RoomVersion = (RoomVersionId, RoomVersionStability); type RoomVersion = (RoomVersionId, RoomVersionStability);

View file

@ -10,7 +10,7 @@ mod unsigned;
use std::fmt::Debug; use std::fmt::Debug;
use ruma::{ use ruma::{
CanonicalJsonObject, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, CanonicalJsonObject, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId,
RoomVersionId, UserId, events::TimelineEventType, RoomVersionId, UserId, events::TimelineEventType,
}; };
use serde::Deserialize; use serde::Deserialize;
@ -168,7 +168,12 @@ pub trait Event: Clone + Debug {
fn redacts(&self) -> Option<&EventId>; fn redacts(&self) -> Option<&EventId>;
/// The `RoomId` of this event. /// The `RoomId` of this event.
fn room_id(&self) -> &RoomId; fn room_id(&self) -> Option<&RoomId>;
/// The `RoomId` or hash of this event.
/// This should only be preferred over room_id() if the event is a v12
/// create event.
fn room_id_or_hash(&self) -> OwnedRoomId;
/// The `UserId` of this event. /// The `UserId` of this event.
fn sender(&self) -> &UserId; fn sender(&self) -> &UserId;

View file

@ -32,12 +32,16 @@ impl<E: Event> Matches<E> for &RoomEventFilter {
} }
fn matches_room<E: Event>(event: &E, filter: &RoomEventFilter) -> bool { fn matches_room<E: Event>(event: &E, filter: &RoomEventFilter) -> bool {
if filter.not_rooms.iter().any(is_equal_to!(event.room_id())) { if filter
.not_rooms
.iter()
.any(is_equal_to!(event.room_id().unwrap()))
{
return false; return false;
} }
if let Some(rooms) = filter.rooms.as_ref() { if let Some(rooms) = filter.rooms.as_ref() {
if !rooms.iter().any(is_equal_to!(event.room_id())) { if !rooms.iter().any(is_equal_to!(event.room_id().unwrap())) {
return false; return false;
} }
} }

View file

@ -31,7 +31,7 @@ use crate::Result;
pub struct Pdu { pub struct Pdu {
pub event_id: OwnedEventId, pub event_id: OwnedEventId,
pub room_id: OwnedRoomId, pub room_id: Option<OwnedRoomId>,
pub sender: OwnedUserId, pub sender: OwnedUserId,
@ -110,7 +110,19 @@ impl Event for Pdu {
fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() } fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() }
#[inline] #[inline]
fn room_id(&self) -> &RoomId { &self.room_id } fn room_id(&self) -> Option<&RoomId> { self.room_id.as_deref() }
#[inline]
fn room_id_or_hash(&self) -> OwnedRoomId {
if let Some(room_id) = &self.room_id {
room_id.clone()
} else {
let constructed_hash = "!".to_owned() + &self.event_id.as_str()[1..];
RoomId::parse(&constructed_hash)
.expect("event ID can be indexed")
.to_owned()
}
}
#[inline] #[inline]
fn sender(&self) -> &UserId { &self.sender } fn sender(&self) -> &UserId { &self.sender }
@ -163,7 +175,19 @@ impl Event for &Pdu {
fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() } fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() }
#[inline] #[inline]
fn room_id(&self) -> &RoomId { &self.room_id } fn room_id(&self) -> Option<&RoomId> { self.room_id.as_ref().map(AsRef::as_ref) }
#[inline]
fn room_id_or_hash(&self) -> OwnedRoomId {
if let Some(room_id) = &self.room_id {
room_id.clone()
} else {
let constructed_hash = "!".to_owned() + &self.event_id.as_str()[1..];
RoomId::parse(&constructed_hash)
.expect("event ID can be indexed")
.to_owned()
}
}
#[inline] #[inline]
fn sender(&self) -> &UserId { &self.sender } fn sender(&self) -> &UserId { &self.sender }

View file

@ -406,7 +406,7 @@ where
Pdu { Pdu {
event_id: id.try_into().unwrap(), event_id: id.try_into().unwrap(),
room_id: room_id().to_owned(), room_id: Some(room_id().to_owned()),
sender: sender.to_owned(), sender: sender.to_owned(),
origin_server_ts: ts.try_into().unwrap(), origin_server_ts: ts.try_into().unwrap(),
state_key: state_key.map(Into::into), state_key: state_key.map(Into::into),

View file

@ -2,10 +2,10 @@ use std::{borrow::Borrow, collections::BTreeSet};
use futures::{ use futures::{
Future, Future,
future::{OptionFuture, join3}, future::{OptionFuture, join, join3},
}; };
use ruma::{ use ruma::{
Int, OwnedUserId, RoomVersionId, UserId, Int, OwnedRoomId, OwnedUserId, RoomVersionId, UserId,
events::room::{ events::room::{
create::RoomCreateEventContent, create::RoomCreateEventContent,
join_rules::{JoinRule, RoomJoinRulesEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent},
@ -44,6 +44,15 @@ struct RoomMemberContentFields {
join_authorised_via_users_server: Option<Raw<OwnedUserId>>, join_authorised_via_users_server: Option<Raw<OwnedUserId>>,
} }
#[derive(Deserialize)]
struct RoomCreateContentFields {
room_version: Option<Raw<RoomVersionId>>,
creator: Option<Raw<IgnoredAny>>,
additional_creators: Option<Vec<Raw<OwnedUserId>>>,
#[serde(rename = "m.federate", default = "ruma::serde::default_true")]
federate: bool,
}
/// For the given event `kind` what are the relevant auth events that are needed /// For the given event `kind` what are the relevant auth events that are needed
/// to authenticate this `content`. /// to authenticate this `content`.
/// ///
@ -56,16 +65,24 @@ pub fn auth_types_for_event(
sender: &UserId, sender: &UserId,
state_key: Option<&str>, state_key: Option<&str>,
content: &RawJsonValue, content: &RawJsonValue,
room_version: &RoomVersion,
) -> serde_json::Result<Vec<(StateEventType, StateKey)>> { ) -> serde_json::Result<Vec<(StateEventType, StateKey)>> {
if kind == &TimelineEventType::RoomCreate { if kind == &TimelineEventType::RoomCreate {
return Ok(vec![]); return Ok(vec![]);
} }
let mut auth_types = vec![ let mut auth_types = if room_version.room_ids_as_hashes {
(StateEventType::RoomPowerLevels, StateKey::new()), vec![
(StateEventType::RoomMember, sender.as_str().into()), (StateEventType::RoomPowerLevels, StateKey::new()),
(StateEventType::RoomCreate, StateKey::new()), (StateEventType::RoomMember, sender.as_str().into()),
]; ]
} else {
vec![
(StateEventType::RoomPowerLevels, StateKey::new()),
(StateEventType::RoomMember, sender.as_str().into()),
(StateEventType::RoomCreate, StateKey::new()),
]
};
if kind == &TimelineEventType::RoomMember { if kind == &TimelineEventType::RoomMember {
#[derive(Deserialize)] #[derive(Deserialize)]
@ -141,6 +158,7 @@ pub async fn auth_check<E, F, Fut>(
incoming_event: &E, incoming_event: &E,
current_third_party_invite: Option<&E>, current_third_party_invite: Option<&E>,
fetch_state: F, fetch_state: F,
create_event: &E,
) -> Result<bool, Error> ) -> Result<bool, Error>
where where
F: Fn(&StateEventType, &str) -> Fut + Send, F: Fn(&StateEventType, &str) -> Fut + Send,
@ -169,12 +187,6 @@ where
// //
// 1. If type is m.room.create: // 1. If type is m.room.create:
if *incoming_event.event_type() == TimelineEventType::RoomCreate { if *incoming_event.event_type() == TimelineEventType::RoomCreate {
#[derive(Deserialize)]
struct RoomCreateContentFields {
room_version: Option<Raw<RoomVersionId>>,
creator: Option<Raw<IgnoredAny>>,
}
debug!("start m.room.create check"); debug!("start m.room.create check");
// If it has any previous events, reject // If it has any previous events, reject
@ -184,14 +196,16 @@ where
} }
// If the domain of the room_id does not match the domain of the sender, reject // If the domain of the room_id does not match the domain of the sender, reject
let Some(room_id_server_name) = incoming_event.room_id().server_name() else { if incoming_event.room_id().is_some() {
warn!("room ID has no servername"); let Some(room_id_server_name) = incoming_event.room_id().unwrap().server_name()
return Ok(false); else {
}; warn!("room ID has no servername");
return Ok(false);
if room_id_server_name != sender.server_name() { };
warn!("servername of room ID does not match servername of sender"); if room_id_server_name != sender.server_name() {
return Ok(false); warn!("servername of room ID does not match servername of sender");
return Ok(false);
}
} }
// If content.room_version is present and is not a recognized version, reject // If content.room_version is present and is not a recognized version, reject
@ -204,7 +218,15 @@ where
return Ok(false); return Ok(false);
} }
if !room_version.use_room_create_sender { // TODO(hydra): If the create event has a room_id, reject
if room_version.room_ids_as_hashes && incoming_event.room_id().is_some() {
warn!("this room version does not support room IDs in m.room.create");
return Ok(false);
}
if !room_version.use_room_create_sender
&& !room_version.explicitly_privilege_room_creators
{
// If content has no creator field, reject // If content has no creator field, reject
if content.creator.is_none() { if content.creator.is_none() {
warn!("no creator field found in m.room.create content"); warn!("no creator field found in m.room.create content");
@ -216,6 +238,8 @@ where
return Ok(true); return Ok(true);
} }
// NOTE(hydra): We must have an event ID from this point forward.
/* /*
// TODO: In the past this code was commented as it caused problems with Synapse. This is no // TODO: In the past this code was commented as it caused problems with Synapse. This is no
// longer the case. This needs to be implemented. // longer the case. This needs to be implemented.
@ -242,54 +266,102 @@ where
} }
*/ */
let (room_create_event, power_levels_event, sender_member_event) = join3( let (power_levels_event, sender_member_event) = join(
fetch_state(&StateEventType::RoomCreate, ""), // fetch_state(&StateEventType::RoomCreate, ""),
fetch_state(&StateEventType::RoomPowerLevels, ""), fetch_state(&StateEventType::RoomPowerLevels, ""),
fetch_state(&StateEventType::RoomMember, sender.as_str()), fetch_state(&StateEventType::RoomMember, sender.as_str()),
) )
.await; .await;
let room_create_event = match room_create_event { // TODO(hydra): Re-enable <v12 checks
| None => { // let room_create_event = match room_create_event {
warn!("no m.room.create event in auth chain"); // | None => {
return Ok(false); // // Room was either v11 with no create event, or v12+ room
}, // if incoming_event.room_id().is_some() {
| Some(e) => e, // // invalid v11
// warn!("no m.room.create event found in claimed state");
// return Ok(false);
// }
// // v12 room
// debug!("no m.room.create event found, assuming v12 room");
// create_event.clone()
// },
// | Some(e) => e,
// };
let room_create_event = create_event.clone();
// Get the content of the room create event, used later.
let room_create_content: RoomCreateContentFields =
from_json_str(room_create_event.content().get())?;
if room_create_content
.room_version
.is_some_and(|v| v.deserialize().is_err())
{
warn!("invalid room version found in m.room.create event");
return Ok(false);
}
let expected_room_id = match room_version.room_ids_as_hashes {
// If the room version uses hashes, we replace the create event's event ID leading sigil
// with !
| true => OwnedRoomId::try_from(room_create_event.event_id().as_str().replace('$', "!"))
.expect("Failed to convert event ID to room ID")
.clone(),
| false => room_create_event.room_id().unwrap().to_owned(),
}; };
if incoming_event.room_id() != room_create_event.room_id() { if incoming_event.room_id().unwrap() != expected_room_id {
warn!("room_id of incoming event does not match room_id of m.room.create event"); warn!(
expected = %expected_room_id,
received = %incoming_event.room_id().unwrap(),
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
incoming_event.room_id().unwrap(),
expected_room_id,
);
return Ok(false);
}
// If the create event is referenced in the event's auth events, and this is a
// v12 room, reject
let claims_create_event = incoming_event
.auth_events()
.any(|id| id == room_create_event.event_id());
if room_version.room_ids_as_hashes && claims_create_event {
warn!("m.room.create event incorrectly found in auth events");
return Ok(false);
} else if !room_version.room_ids_as_hashes && !claims_create_event {
// If the create event is not referenced in the event's auth events, and this is
// a v11 room, reject
warn!("no m.room.create event found in auth events");
return Ok(false); return Ok(false);
} }
if let Some(ref pe) = power_levels_event { if let Some(ref pe) = power_levels_event {
if pe.room_id() != room_create_event.room_id() { if *pe.room_id().unwrap() != expected_room_id {
warn!("room_id of power levels event does not match room_id of m.room.create event"); warn!(
expected = %expected_room_id,
received = %pe.room_id().unwrap(),
"room_id of power levels event does not match room_id of m.room.create event"
);
return Ok(false); return Ok(false);
} }
} }
// 3. If event does not have m.room.create in auth_events reject // 3. If event does not have m.room.create in auth_events reject
if !incoming_event // removed as part of Hydra.
.auth_events() // TODO: reintroduce this for <v12 lol
.any(|id| id == room_create_event.event_id()) // if !incoming_event
{ // .auth_events()
warn!("no m.room.create event in auth events"); // .any(|id| id == room_create_event.event_id())
return Ok(false); // {
} // warn!("no m.room.create event in auth events");
// return Ok(false);
// }
// If the create event content has the field m.federate set to false and the // If the create event content has the field m.federate set to false and the
// sender domain of the event does not match the sender domain of the create // sender domain of the event does not match the sender domain of the create
// event, reject. // event, reject.
#[derive(Deserialize)] if !room_version.room_ids_as_hashes
#[allow(clippy::items_after_statements)] && !room_create_content.federate
struct RoomCreateContentFederate {
#[serde(rename = "m.federate", default = "ruma::serde::default_true")]
federate: bool,
}
let room_create_content: RoomCreateContentFederate =
from_json_str(room_create_event.content().get())?;
if !room_create_content.federate
&& room_create_event.sender().server_name() != incoming_event.sender().server_name() && room_create_event.sender().server_name() != incoming_event.sender().server_name()
{ {
warn!( warn!(
@ -321,7 +393,7 @@ where
debug!("starting m.room.member check"); debug!("starting m.room.member check");
let state_key = match incoming_event.state_key() { let state_key = match incoming_event.state_key() {
| None => { | None => {
warn!("no statekey in member event"); warn!("no state key in member event");
return Ok(false); return Ok(false);
}, },
| Some(s) => s, | Some(s) => s,
@ -377,6 +449,7 @@ where
&user_for_join_auth_membership, &user_for_join_auth_membership,
&room_create_event, &room_create_event,
)? { )? {
warn!("membership change not valid for some reason");
return Ok(false); return Ok(false);
} }
@ -394,8 +467,16 @@ where
}, },
}; };
if sender_member_event.room_id() != room_create_event.room_id() { if sender_member_event
warn!("room_id of incoming event does not match room_id of m.room.create event"); .room_id()
.expect("we have a room ID for non create events")
!= room_create_event.room_id_or_hash()
{
warn!(
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
sender_member_event.room_id_or_hash(),
room_create_event.room_id_or_hash()
);
return Ok(false); return Ok(false);
} }
@ -417,7 +498,7 @@ where
} }
// If type is m.room.third_party_invite // If type is m.room.third_party_invite
let sender_power_level = match &power_levels_event { let mut sender_power_level = match &power_levels_event {
| Some(pl) => { | Some(pl) => {
let content = let content =
deserialize_power_levels_content_fields(pl.content().get(), room_version)?; deserialize_power_levels_content_fields(pl.content().get(), room_version)?;
@ -439,6 +520,24 @@ where
if is_creator { int!(100) } else { int!(0) } if is_creator { int!(100) } else { int!(0) }
}, },
}; };
if room_version.explicitly_privilege_room_creators {
// If the user sent the create event, or is listed in additional_creators, just
// give them Int::MAX
if sender == room_create_event.sender()
|| room_create_content
.additional_creators
.as_ref()
.is_some_and(|creators| {
creators
.iter()
.any(|c| c.deserialize().is_ok_and(|c| c == *sender))
}) {
trace!("privileging room creator or additional creator");
// This user is the room creator or an additional creator, give them max power
// level
sender_power_level = Int::MAX;
}
}
// Allow if and only if sender's current power level is greater than // Allow if and only if sender's current power level is greater than
// or equal to the invite level // or equal to the invite level
@ -554,6 +653,7 @@ where
struct GetThirdPartyInvite { struct GetThirdPartyInvite {
third_party_invite: Option<Raw<ThirdPartyInvite>>, third_party_invite: Option<Raw<ThirdPartyInvite>>,
} }
let create_content = from_json_str::<RoomCreateContentFields>(create_room.content().get())?;
let content = current_event.content(); let content = current_event.content();
let target_membership = from_json_str::<GetMembership>(content.get())?.membership; let target_membership = from_json_str::<GetMembership>(content.get())?.membership;
@ -576,15 +676,36 @@ where
| None => RoomPowerLevelsEventContent::default(), | None => RoomPowerLevelsEventContent::default(),
}; };
let sender_power = power_levels let mut sender_power = power_levels
.users .users
.get(sender) .get(sender)
.or_else(|| sender_is_joined.then_some(&power_levels.users_default)); .or_else(|| sender_is_joined.then_some(&power_levels.users_default));
let target_power = power_levels.users.get(target_user).or_else(|| { let mut target_power = power_levels.users.get(target_user).or_else(|| {
(target_membership == MembershipState::Join).then_some(&power_levels.users_default) (target_membership == MembershipState::Join).then_some(&power_levels.users_default)
}); });
let mut creators = BTreeSet::new();
creators.insert(create_room.sender().to_owned());
if room_version.explicitly_privilege_room_creators {
// Explicitly privilege room creators
// If the sender sent the create event, or in additional_creators, give them
// Int::MAX. Same case for target.
if let Some(additional_creators) = &create_content.additional_creators {
for c in additional_creators {
if let Ok(c) = c.deserialize() {
creators.insert(c);
}
}
}
if creators.contains(sender) {
sender_power = Some(&Int::MAX);
}
if creators.contains(target_user) {
target_power = Some(&Int::MAX);
}
}
let mut join_rules = JoinRule::Invite; let mut join_rules = JoinRule::Invite;
if let Some(jr) = &join_rules_event { if let Some(jr) = &join_rules_event {
join_rules = from_json_str::<RoomJoinRulesEventContent>(jr.content().get())?.join_rule; join_rules = from_json_str::<RoomJoinRulesEventContent>(jr.content().get())?.join_rule;
@ -597,7 +718,7 @@ where
let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth { let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth {
// Is the authorised user allowed to invite users into this room // Is the authorised user allowed to invite users into this room
let (auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event { let (mut auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event {
// TODO Refactor all powerlevel parsing // TODO Refactor all powerlevel parsing
let invite = let invite =
deserialize_power_levels_content_invite(pl.content().get(), room_version)?.invite; deserialize_power_levels_content_invite(pl.content().get(), room_version)?.invite;
@ -613,6 +734,9 @@ where
} else { } else {
(int!(0), int!(0)) (int!(0), int!(0))
}; };
if creators.contains(user_for_join_auth) {
auth_user_pl = Int::MAX;
}
(user_for_join_auth_membership == &MembershipState::Join) (user_for_join_auth_membership == &MembershipState::Join)
&& (auth_user_pl >= invite_level) && (auth_user_pl >= invite_level)
} else { } else {
@ -622,6 +746,7 @@ where
Ok(match target_membership { Ok(match target_membership {
| MembershipState::Join => { | MembershipState::Join => {
debug!("starting target_membership=join check");
// 1. If the only previous event is an m.room.create and the state_key is the // 1. If the only previous event is an m.room.create and the state_key is the
// creator, // creator,
// allow // allow
@ -633,7 +758,10 @@ where
let no_more_prev_events = prev_events.next().is_none(); let no_more_prev_events = prev_events.next().is_none();
if prev_event_is_create_event && no_more_prev_events { if prev_event_is_create_event && no_more_prev_events {
let is_creator = if room_version.use_room_create_sender { debug!("checking if sender is a room creator for initial membership event");
let is_creator = if room_version.explicitly_privilege_room_creators {
creators.contains(target_user) && creators.contains(sender)
} else if room_version.use_room_create_sender {
let creator = create_room.sender(); let creator = create_room.sender();
creator == sender && creator == target_user creator == sender && creator == target_user
@ -647,10 +775,15 @@ where
}; };
if is_creator { if is_creator {
debug!("sender is room creator, allowing join");
return Ok(true); return Ok(true);
} }
debug!("sender is not room creator, proceeding with normal auth checks");
} }
let membership_allows_join = matches!(
target_user_current_membership,
MembershipState::Join | MembershipState::Invite
);
if sender != target_user { if sender != target_user {
// If the sender does not match state_key, reject. // If the sender does not match state_key, reject.
warn!("Can't make other user join"); warn!("Can't make other user join");
@ -659,39 +792,48 @@ where
// If the sender is banned, reject. // If the sender is banned, reject.
warn!(?target_user_membership_event_id, "Banned user can't join"); warn!(?target_user_membership_event_id, "Banned user can't join");
false false
} else if (join_rules == JoinRule::Invite
|| room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_))))
// If the join_rule is invite then allow if membership state is invite or join
&& (target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Invite)
{
true
} else if room_version.restricted_join_rules
&& matches!(join_rules, JoinRule::Restricted(_))
|| room_version.knock_restricted_join_rule
&& matches!(join_rules, JoinRule::KnockRestricted(_))
{
// If the join_rule is restricted or knock_restricted
if matches!(
target_user_current_membership,
MembershipState::Invite | MembershipState::Join
) {
// If membership state is join or invite, allow.
true
} else {
// If the join_authorised_via_users_server key in content is not a user with
// sufficient permission to invite other users, reject.
// Otherwise, allow.
user_for_join_auth_is_valid
}
} else { } else {
// If the join_rule is public, allow. match join_rules {
// Otherwise, reject. | JoinRule::Invite if !membership_allows_join => {
join_rules == JoinRule::Public warn!("Join rule is invite but membership does not allow join");
false
},
| JoinRule::Knock if !room_version.allow_knocking => {
warn!("Join rule is knock but room version does not allow knocking");
false
},
| JoinRule::Knock if !membership_allows_join => {
warn!("Join rule is knock but membership does not allow join");
false
},
| JoinRule::KnockRestricted(_) if !room_version.knock_restricted_join_rule =>
{
warn!(
"Join rule is knock_restricted but room version does not support it"
);
false
},
| JoinRule::KnockRestricted(_) if !membership_allows_join => {
warn!("Join rule is knock_restricted but membership does not allow join");
false
},
| JoinRule::Restricted(_) | JoinRule::KnockRestricted(_) =>
if !user_for_join_auth_is_valid {
warn!(
"Join rule is a restricted one but no valid authorising user \
was given"
);
false
} else {
true
},
| _ => true,
}
} }
}, },
| MembershipState::Invite => { | MembershipState::Invite => {
// If content has third_party_invite key // If content has third_party_invite key
debug!("starting target_membership=invite check");
match third_party_invite.and_then(|i| i.deserialize().ok()) { match third_party_invite.and_then(|i| i.deserialize().ok()) {
| Some(tp_id) => | Some(tp_id) =>
if target_user_current_membership == MembershipState::Ban { if target_user_current_membership == MembershipState::Ban {
@ -849,6 +991,7 @@ fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int)
required_level = i64::from(event_type_power_level), required_level = i64::from(event_type_power_level),
user_level = i64::from(user_level), user_level = i64::from(user_level),
state_key = ?event.state_key(), state_key = ?event.state_key(),
power_level_event_id = ?ple.map(|e| e.event_id().as_str()),
"permissions factors", "permissions factors",
); );

View file

@ -92,6 +92,11 @@ where
Pdu: Event + Clone + Send + Sync, Pdu: Event + Clone + Send + Sync,
for<'b> &'b Pdu: Event + Send, for<'b> &'b Pdu: Event + Send,
{ {
use RoomVersionId::*;
let stateres_version = match room_version {
| V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 | V11 => 2.0,
| _ => 2.1,
};
debug!("State resolution starting"); debug!("State resolution starting");
// Split non-conflicting and conflicting state // Split non-conflicting and conflicting state
@ -108,13 +113,27 @@ where
debug!(count = conflicting.len(), "conflicting events"); debug!(count = conflicting.len(), "conflicting events");
trace!(map = ?conflicting, "conflicting events"); trace!(map = ?conflicting, "conflicting events");
let conflicted_state_subgraph: HashSet<_> = if stateres_version >= 2.1 {
calculate_conflicted_subgraph(&conflicting, event_fetch)
.await
.ok_or_else(|| {
Error::InvalidPdu("Failed to calculate conflicted subgraph".to_owned())
})?
} else {
HashSet::new()
};
debug!(count = conflicted_state_subgraph.len(), "conflicted subgraph");
trace!(set = ?conflicted_state_subgraph, "conflicted subgraph");
let conflicting_values = conflicting.into_values().flatten().stream(); let conflicting_values = conflicting.into_values().flatten().stream();
// `all_conflicted` contains unique items // `all_conflicted` contains unique items
// synapse says `full_set = {eid for eid in full_conflicted_set if eid in // synapse says `full_set = {eid for eid in full_conflicted_set if eid in
// event_map}` // event_map}`
// Hydra: Also consider the conflicted state subgraph
let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets) let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets)
.chain(conflicting_values) .chain(conflicting_values)
.chain(conflicted_state_subgraph.into_iter().stream())
.broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id)) .broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id))
.collect() .collect()
.await; .await;
@ -150,6 +169,7 @@ where
// Sequentially auth check each control event. // Sequentially auth check each control event.
let resolved_control = iterative_auth_check( let resolved_control = iterative_auth_check(
&room_version, &room_version,
stateres_version,
sorted_control_levels.iter().stream().map(AsRef::as_ref), sorted_control_levels.iter().stream().map(AsRef::as_ref),
clean.clone(), clean.clone(),
&event_fetch, &event_fetch,
@ -183,10 +203,11 @@ where
let sorted_left_events = let sorted_left_events =
mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?; mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?;
trace!(list = ?sorted_left_events, "events left, sorted"); trace!(list = ?sorted_left_events, "events left, sorted, running iterative auth check");
let mut resolved_state = iterative_auth_check( let mut resolved_state = iterative_auth_check(
&room_version, &room_version,
stateres_version,
sorted_left_events.iter().stream().map(AsRef::as_ref), sorted_left_events.iter().stream().map(AsRef::as_ref),
resolved_control, // The control events are added to the final resolved state resolved_control, // The control events are added to the final resolved state
&event_fetch, &event_fetch,
@ -198,6 +219,7 @@ where
resolved_state.extend(clean); resolved_state.extend(clean);
debug!("state resolution finished"); debug!("state resolution finished");
trace!( map = ?resolved_state, "final resolved state" );
Ok(resolved_state) Ok(resolved_state)
} }
@ -250,6 +272,52 @@ where
(unconflicted_state, conflicted_state) (unconflicted_state, conflicted_state)
} }
/// Calculate the conflicted subgraph
async fn calculate_conflicted_subgraph<F, Fut, E>(
conflicted: &StateMap<Vec<OwnedEventId>>,
fetch_event: &F,
) -> Option<HashSet<OwnedEventId>>
where
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E: Event + Send + Sync,
{
let conflicted_events: HashSet<_> = conflicted.values().flatten().cloned().collect();
let mut subgraph: HashSet<OwnedEventId> = HashSet::new();
let mut stack: Vec<Vec<OwnedEventId>> =
vec![conflicted_events.iter().cloned().collect::<Vec<_>>()];
let mut path: Vec<OwnedEventId> = Vec::new();
let mut seen: HashSet<OwnedEventId> = HashSet::new();
let next_event = |stack: &mut Vec<Vec<_>>, path: &mut Vec<_>| {
while stack.last().is_some_and(std::vec::Vec::is_empty) {
stack.pop();
path.pop();
}
stack.last_mut().and_then(std::vec::Vec::pop)
};
while let Some(event_id) = next_event(&mut stack, &mut path) {
path.push(event_id.clone());
if subgraph.contains(&event_id) {
if path.len() > 1 {
subgraph.extend(path.iter().cloned());
}
path.pop();
continue;
}
if conflicted_events.contains(&event_id) && path.len() > 1 {
subgraph.extend(path.iter().cloned());
}
if seen.contains(&event_id) {
path.pop();
continue;
}
let evt = fetch_event(event_id.clone()).await?;
stack.push(evt.auth_events().map(ToOwned::to_owned).collect());
seen.insert(event_id);
}
Some(subgraph)
}
/// Returns a Vec of deduped EventIds that appear in some chains but not others. /// Returns a Vec of deduped EventIds that appear in some chains but not others.
#[allow(clippy::arithmetic_side_effects)] #[allow(clippy::arithmetic_side_effects)]
fn get_auth_chain_diff<Id, Hasher>( fn get_auth_chain_diff<Id, Hasher>(
@ -513,8 +581,10 @@ where
/// For each `events_to_check` event we gather the events needed to auth it from /// For each `events_to_check` event we gather the events needed to auth it from
/// the the `fetch_event` closure and verify each event using the /// the the `fetch_event` closure and verify each event using the
/// `event_auth::auth_check` function. /// `event_auth::auth_check` function.
#[tracing::instrument(level = "trace", skip_all)]
async fn iterative_auth_check<'a, E, F, Fut, S>( async fn iterative_auth_check<'a, E, F, Fut, S>(
room_version: &RoomVersion, room_version: &RoomVersion,
stateres_version: f32,
events_to_check: S, events_to_check: S,
unconflicted_state: StateMap<OwnedEventId>, unconflicted_state: StateMap<OwnedEventId>,
fetch_event: &F, fetch_event: &F,
@ -538,12 +608,15 @@ where
.try_collect() .try_collect()
.boxed() .boxed()
.await?; .await?;
trace!(list = ?events_to_check, "events to check");
let auth_event_ids: HashSet<OwnedEventId> = events_to_check let auth_event_ids: HashSet<OwnedEventId> = events_to_check
.iter() .iter()
.flat_map(|event: &E| event.auth_events().map(ToOwned::to_owned)) .flat_map(|event: &E| event.auth_events().map(ToOwned::to_owned))
.collect(); .collect();
trace!(set = ?auth_event_ids, "auth event IDs to fetch");
let auth_events: HashMap<OwnedEventId, E> = auth_event_ids let auth_events: HashMap<OwnedEventId, E> = auth_event_ids
.into_iter() .into_iter()
.stream() .stream()
@ -553,9 +626,15 @@ where
.boxed() .boxed()
.await; .await;
trace!(map = ?auth_events.keys().collect::<Vec<_>>(), "fetched auth events");
let auth_events = &auth_events; let auth_events = &auth_events;
let mut resolved_state = unconflicted_state; let mut resolved_state = match stateres_version {
| 2.1 => StateMap::new(),
| _ => unconflicted_state,
};
for event in events_to_check { for event in events_to_check {
trace!(event_id = event.event_id().as_str(), "checking event");
let state_key = event let state_key = event
.state_key() .state_key()
.ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?;
@ -565,13 +644,29 @@ where
event.sender(), event.sender(),
Some(state_key), Some(state_key),
event.content(), event.content(),
room_version,
)?; )?;
trace!(list = ?auth_types, event_id = event.event_id().as_str(), "auth types for event");
let mut auth_state = StateMap::new(); let mut auth_state = StateMap::new();
if room_version.room_ids_as_hashes {
trace!("room version uses hashed IDs, manually fetching create event");
let create_event_id_raw = event.room_id_or_hash().as_str().replace('!', "$");
let create_event_id = EventId::parse(&create_event_id_raw).map_err(|e| {
Error::InvalidPdu(format!(
"Failed to parse create event ID from room ID/hash: {e}"
))
})?;
let create_event = fetch_event(create_event_id.into())
.await
.ok_or_else(|| Error::NotFound("Failed to find create event".into()))?;
auth_state.insert(create_event.event_type().with_state_key(""), create_event);
}
for aid in event.auth_events() { for aid in event.auth_events() {
if let Some(ev) = auth_events.get(aid) { if let Some(ev) = auth_events.get(aid) {
//TODO: synapse checks "rejected_reason" which is most likely related to //TODO: synapse checks "rejected_reason" which is most likely related to
// soft-failing // soft-failing
trace!(event_id = aid.as_str(), "found auth event");
auth_state.insert( auth_state.insert(
ev.event_type() ev.event_type()
.with_state_key(ev.state_key().ok_or_else(|| { .with_state_key(ev.state_key().ok_or_else(|| {
@ -600,8 +695,9 @@ where
auth_state.insert(key.to_owned(), event); auth_state.insert(key.to_owned(), event);
}) })
.await; .await;
trace!(map = ?auth_state.keys().collect::<Vec<_>>(), event_id = event.event_id().as_str(), "auth state for event");
debug!("event to check {:?}", event.event_id()); debug!(event_id = event.event_id().as_str(), "Running auth checks");
// The key for this is (eventType + a state_key of the signed token not sender) // The key for this is (eventType + a state_key of the signed token not sender)
// so search for it // so search for it
@ -617,16 +713,29 @@ where
) )
}; };
let auth_result = let auth_result = auth_check(
auth_check(room_version, &event, current_third_party, fetch_state).await; room_version,
&event,
current_third_party,
fetch_state,
&fetch_state(&StateEventType::RoomCreate, "")
.await
.expect("create event must exist"),
)
.await;
match auth_result { match auth_result {
| Ok(true) => { | Ok(true) => {
// add event to resolved state map // add event to resolved state map
trace!(
event_id = event.event_id().as_str(),
"event passed the authentication check, adding to resolved state"
);
resolved_state.insert( resolved_state.insert(
event.event_type().with_state_key(state_key), event.event_type().with_state_key(state_key),
event.event_id().to_owned(), event.event_id().to_owned(),
); );
trace!(map = ?resolved_state, "new resolved state");
}, },
| Ok(false) => { | Ok(false) => {
// synapse passes here on AuthError. We do not add this event to resolved_state. // synapse passes here on AuthError. We do not add this event to resolved_state.
@ -638,7 +747,8 @@ where
}, },
} }
} }
trace!(map = ?resolved_state, "final resolved state from iterative auth check");
debug!("iterative auth check finished");
Ok(resolved_state) Ok(resolved_state)
} }
@ -909,6 +1019,7 @@ mod tests {
let resolved_power = super::iterative_auth_check( let resolved_power = super::iterative_auth_check(
&RoomVersion::V6, &RoomVersion::V6,
2.1,
sorted_power_events.iter().map(AsRef::as_ref).stream(), sorted_power_events.iter().map(AsRef::as_ref).stream(),
HashMap::new(), // unconflicted events HashMap::new(), // unconflicted events
&fetcher, &fetcher,

View file

@ -61,25 +61,34 @@ pub struct RoomVersion {
pub extra_redaction_checks: bool, pub extra_redaction_checks: bool,
/// Allow knocking in event authentication. /// Allow knocking in event authentication.
/// ///
/// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/) for more information. /// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/)
pub allow_knocking: bool, pub allow_knocking: bool,
/// Adds support for the restricted join rule. /// Adds support for the restricted join rule.
/// ///
/// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289) for more information. /// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289)
pub restricted_join_rules: bool, pub restricted_join_rules: bool,
/// Adds support for the knock_restricted join rule. /// Adds support for the knock_restricted join rule.
/// ///
/// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) for more information. /// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787)
pub knock_restricted_join_rule: bool, pub knock_restricted_join_rule: bool,
/// Enforces integer power levels. /// Enforces integer power levels.
/// ///
/// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667) for more information. /// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667)
pub integer_power_levels: bool, pub integer_power_levels: bool,
/// Determine the room creator using the `m.room.create` event's `sender`, /// Determine the room creator using the `m.room.create` event's `sender`,
/// instead of the event content's `creator` field. /// instead of the event content's `creator` field.
/// ///
/// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175) for more information. /// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175)
pub use_room_create_sender: bool, pub use_room_create_sender: bool,
/// Whether the room creators are considered superusers.
/// A superuser will always have infinite power levels in the room.
///
/// See: [MSC4289](https://github.com/matrix-org/matrix-spec-proposals/pull/4289)
pub explicitly_privilege_room_creators: bool,
/// Whether the room's m.room.create event ID is itself the room ID.
///
/// See: [MSC4291](https://github.com/matrix-org/matrix-spec-proposals/pull/4291)
pub room_ids_as_hashes: bool,
} }
impl RoomVersion { impl RoomVersion {
@ -97,6 +106,8 @@ impl RoomVersion {
knock_restricted_join_rule: false, knock_restricted_join_rule: false,
integer_power_levels: false, integer_power_levels: false,
use_room_create_sender: false, use_room_create_sender: false,
explicitly_privilege_room_creators: false,
room_ids_as_hashes: false,
}; };
pub const V10: Self = Self { pub const V10: Self = Self {
knock_restricted_join_rule: true, knock_restricted_join_rule: true,
@ -107,6 +118,11 @@ impl RoomVersion {
use_room_create_sender: true, use_room_create_sender: true,
..Self::V10 ..Self::V10
}; };
pub const V12: Self = Self {
explicitly_privilege_room_creators: true,
room_ids_as_hashes: true,
..Self::V11
};
pub const V2: Self = Self { pub const V2: Self = Self {
state_res: StateResolutionVersion::V2, state_res: StateResolutionVersion::V2,
..Self::V1 ..Self::V1
@ -144,6 +160,7 @@ impl RoomVersion {
| RoomVersionId::V9 => Self::V9, | RoomVersionId::V9 => Self::V9,
| RoomVersionId::V10 => Self::V10, | RoomVersionId::V10 => Self::V10,
| RoomVersionId::V11 => Self::V11, | RoomVersionId::V11 => Self::V11,
| RoomVersionId::V12 => Self::V12,
| ver => return Err(Error::Unsupported(format!("found version `{ver}`"))), | ver => return Err(Error::Unsupported(format!("found version `{ver}`"))),
}) })
} }

View file

@ -24,7 +24,7 @@ use serde_json::{
use super::auth_types_for_event; use super::auth_types_for_event;
use crate::{ use crate::{
Result, info, Result, RoomVersion, info,
matrix::{Event, EventTypeExt, Pdu, StateMap, pdu::EventHash}, matrix::{Event, EventTypeExt, Pdu, StateMap, pdu::EventHash},
}; };
@ -154,6 +154,7 @@ pub(crate) async fn do_check(
fake_event.sender(), fake_event.sender(),
fake_event.state_key(), fake_event.state_key(),
fake_event.content(), fake_event.content(),
&RoomVersion::V6,
) )
.unwrap(); .unwrap();
@ -398,7 +399,7 @@ pub(crate) fn to_init_pdu_event(
Pdu { Pdu {
event_id: id.try_into().unwrap(), event_id: id.try_into().unwrap(),
room_id: room_id().to_owned(), room_id: Some(room_id().to_owned()),
sender: sender.to_owned(), sender: sender.to_owned(),
origin_server_ts: ts.try_into().unwrap(), origin_server_ts: ts.try_into().unwrap(),
state_key: state_key.map(Into::into), state_key: state_key.map(Into::into),
@ -446,7 +447,7 @@ where
Pdu { Pdu {
event_id: id.try_into().unwrap(), event_id: id.try_into().unwrap(),
room_id: room_id().to_owned(), room_id: Some(room_id().to_owned()),
sender: sender.to_owned(), sender: sender.to_owned(),
origin_server_ts: ts.try_into().unwrap(), origin_server_ts: ts.try_into().unwrap(),
state_key: state_key.map(Into::into), state_key: state_key.map(Into::into),

View file

@ -19,7 +19,7 @@ where
S: Stream<Item = K> + Send + 'a, S: Stream<Item = K> + Send + 'a,
K: AsRef<[u8]> + Send + Sync + 'a, K: AsRef<[u8]> + Send + Sync + 'a,
{ {
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a; fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
} }
impl<'a, K, S> Get<'a, K, S> for S impl<'a, K, S> Get<'a, K, S> for S
@ -29,7 +29,7 @@ where
K: AsRef<[u8]> + Send + Sync + 'a, K: AsRef<[u8]> + Send + Sync + 'a,
{ {
#[inline] #[inline]
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a { fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
map.get_batch(self) map.get_batch(self)
} }
} }
@ -39,7 +39,7 @@ where
pub(crate) fn get_batch<'a, S, K>( pub(crate) fn get_batch<'a, S, K>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
keys: S, keys: S,
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a ) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
where where
S: Stream<Item = K> + Send + 'a, S: Stream<Item = K> + Send + 'a,
K: AsRef<[u8]> + Send + Sync + 'a, K: AsRef<[u8]> + Send + Sync + 'a,

View file

@ -10,7 +10,7 @@ use super::stream::is_cached;
use crate::{keyval, keyval::Key, stream}; use crate::{keyval, keyval::Key, stream};
#[implement(super::Map)] #[implement(super::Map)]
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
where where
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
{ {

View file

@ -15,7 +15,7 @@ use crate::{
pub fn keys_from<'a, K, P>( pub fn keys_from<'a, K, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
from: &P, from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P> ) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where where
P: Serialize + ?Sized + Debug, P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
@ -40,7 +40,7 @@ where
pub fn keys_raw_from<'a, K, P>( pub fn keys_raw_from<'a, K, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
from: &P, from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P> ) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync, P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,

View file

@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
pub fn keys_prefix<'a, K, P>( pub fn keys_prefix<'a, K, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &P, prefix: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P> ) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where where
P: Serialize + ?Sized + Debug, P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
@ -37,7 +37,7 @@ where
pub fn keys_raw_prefix<'a, K, P>( pub fn keys_raw_prefix<'a, K, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &'a P, prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a ) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a, K: Deserialize<'a> + Send + 'a,
@ -50,7 +50,7 @@ where
pub fn raw_keys_prefix<'a, P>( pub fn raw_keys_prefix<'a, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &'a P, prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a ) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{ {

View file

@ -17,7 +17,7 @@ where
S: Stream<Item = K> + Send + 'a, S: Stream<Item = K> + Send + 'a,
K: Serialize + Debug, K: Serialize + Debug,
{ {
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a; fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
} }
impl<'a, K, S> Qry<'a, K, S> for S impl<'a, K, S> Qry<'a, K, S> for S
@ -27,7 +27,7 @@ where
K: Serialize + Debug + 'a, K: Serialize + Debug + 'a,
{ {
#[inline] #[inline]
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a { fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
map.qry_batch(self) map.qry_batch(self)
} }
} }
@ -37,7 +37,7 @@ where
pub(crate) fn qry_batch<'a, S, K>( pub(crate) fn qry_batch<'a, S, K>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
keys: S, keys: S,
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a ) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
where where
S: Stream<Item = K> + Send + 'a, S: Stream<Item = K> + Send + 'a,
K: Serialize + Debug + 'a, K: Serialize + Debug + 'a,

View file

@ -10,7 +10,7 @@ use super::rev_stream::is_cached;
use crate::{keyval, keyval::Key, stream}; use crate::{keyval, keyval::Key, stream};
#[implement(super::Map)] #[implement(super::Map)]
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
where where
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
{ {

View file

@ -15,7 +15,7 @@ use crate::{
pub fn rev_keys_from<'a, K, P>( pub fn rev_keys_from<'a, K, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
from: &P, from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P> ) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where where
P: Serialize + ?Sized + Debug, P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
@ -41,7 +41,7 @@ where
pub fn rev_keys_raw_from<'a, K, P>( pub fn rev_keys_raw_from<'a, K, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
from: &P, from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P> ) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync, P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,

View file

@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
pub fn rev_keys_prefix<'a, K, P>( pub fn rev_keys_prefix<'a, K, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &P, prefix: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P> ) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where where
P: Serialize + ?Sized + Debug, P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
@ -37,7 +37,7 @@ where
pub fn rev_keys_raw_prefix<'a, K, P>( pub fn rev_keys_raw_prefix<'a, K, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &'a P, prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a ) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a, K: Deserialize<'a> + Send + 'a,
@ -50,7 +50,7 @@ where
pub fn rev_raw_keys_prefix<'a, P>( pub fn rev_raw_keys_prefix<'a, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &'a P, prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a ) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{ {

View file

@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
#[implement(super::Map)] #[implement(super::Map)]
pub fn rev_stream<'a, K, V>( pub fn rev_stream<'a, K, V>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
where where
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
V: Deserialize<'a> + Send, V: Deserialize<'a> + Send,

View file

@ -20,7 +20,7 @@ use crate::{
pub fn rev_stream_from<'a, K, V, P>( pub fn rev_stream_from<'a, K, V, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
from: &P, from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P> ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where where
P: Serialize + ?Sized + Debug, P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
@ -55,7 +55,7 @@ where
pub fn rev_stream_raw_from<'a, K, V, P>( pub fn rev_stream_raw_from<'a, K, V, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
from: &P, from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P> ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync, P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,

View file

@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
pub fn rev_stream_prefix<'a, K, V, P>( pub fn rev_stream_prefix<'a, K, V, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &P, prefix: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P> ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where where
P: Serialize + ?Sized + Debug, P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
@ -50,7 +50,7 @@ where
pub fn rev_stream_raw_prefix<'a, K, V, P>( pub fn rev_stream_raw_prefix<'a, K, V, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &'a P, prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a, K: Deserialize<'a> + Send + 'a,
@ -68,7 +68,7 @@ where
pub fn rev_raw_stream_prefix<'a, P>( pub fn rev_raw_stream_prefix<'a, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &'a P, prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a ) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{ {

View file

@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
#[implement(super::Map)] #[implement(super::Map)]
pub fn stream<'a, K, V>( pub fn stream<'a, K, V>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
where where
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
V: Deserialize<'a> + Send, V: Deserialize<'a> + Send,

View file

@ -19,7 +19,7 @@ use crate::{
pub fn stream_from<'a, K, V, P>( pub fn stream_from<'a, K, V, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
from: &P, from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P> ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where where
P: Serialize + ?Sized + Debug, P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
@ -53,7 +53,7 @@ where
pub fn stream_raw_from<'a, K, V, P>( pub fn stream_raw_from<'a, K, V, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
from: &P, from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P> ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync, P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,

View file

@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
pub fn stream_prefix<'a, K, V, P>( pub fn stream_prefix<'a, K, V, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &P, prefix: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P> ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where where
P: Serialize + ?Sized + Debug, P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send, K: Deserialize<'a> + Send,
@ -50,7 +50,7 @@ where
pub fn stream_raw_prefix<'a, K, V, P>( pub fn stream_raw_prefix<'a, K, V, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &'a P, prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a ) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a, K: Deserialize<'a> + Send + 'a,
@ -68,7 +68,7 @@ where
pub fn raw_stream_prefix<'a, P>( pub fn raw_stream_prefix<'a, P>(
self: &'a Arc<Self>, self: &'a Arc<Self>,
prefix: &'a P, prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a ) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
where where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{ {

View file

@ -443,7 +443,7 @@ pub(crate) fn into_send_seek(result: stream::State<'_>) -> stream::State<'static
unsafe { std::mem::transmute(result) } unsafe { std::mem::transmute(result) }
} }
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'_> { fn into_recv_seek(result: stream::State<'static>) -> stream::State<'static> {
// SAFETY: This is to receive the State from the channel; see above. // SAFETY: This is to receive the State from the channel; see above.
unsafe { std::mem::transmute(result) } unsafe { std::mem::transmute(result) }
} }

View file

@ -326,7 +326,7 @@ fn ser_array() {
} }
#[test] #[test]
#[ignore] #[ignore = "arrayvec deserialization is not implemented (separators)"]
fn de_array() { fn de_array() {
let a: u64 = 123_456; let a: u64 = 123_456;
let b: u64 = 987_654; let b: u64 = 987_654;
@ -358,7 +358,7 @@ fn de_array() {
} }
#[test] #[test]
#[ignore] #[ignore = "Nested sequences are not supported"]
fn de_complex() { fn de_complex() {
type Key<'a> = (&'a UserId, ArrayVec<u64, 2>, &'a RoomId); type Key<'a> = (&'a UserId, ArrayVec<u64, 2>, &'a RoomId);

View file

@ -1,6 +1,6 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use conduwuit::{Result, pdu::PduBuilder}; use conduwuit::{Result, info, pdu::PduBuilder};
use futures::FutureExt; use futures::FutureExt;
use ruma::{ use ruma::{
RoomId, RoomVersionId, RoomId, RoomVersionId,
@ -26,7 +26,7 @@ use crate::Services;
/// used to issue admin commands by talking to the server user inside it. /// used to issue admin commands by talking to the server user inside it.
pub async fn create_admin_room(services: &Services) -> Result { pub async fn create_admin_room(services: &Services) -> Result {
let room_id = RoomId::new(services.globals.server_name()); let room_id = RoomId::new(services.globals.server_name());
let room_version = &services.config.default_room_version; let room_version = &RoomVersionId::V11;
let _short_id = services let _short_id = services
.rooms .rooms
@ -45,10 +45,13 @@ pub async fn create_admin_room(services: &Services) -> Result {
match room_version { match room_version {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
RoomCreateEventContent::new_v1(server_user.into()), RoomCreateEventContent::new_v1(server_user.into()),
| _ => RoomCreateEventContent::new_v11(), | V11 => RoomCreateEventContent::new_v11(),
| _ => RoomCreateEventContent::new_v12(),
} }
}; };
info!("Creating admin room {} with version {}", room_id, room_version);
// 1. The room create event // 1. The room create event
services services
.rooms .rooms
@ -61,7 +64,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
..create_content ..create_content
}), }),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -77,7 +80,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&RoomMemberEventContent::new(MembershipState::Join), &RoomMemberEventContent::new(MembershipState::Join),
), ),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -95,7 +98,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
..Default::default() ..Default::default()
}), }),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -108,7 +111,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &RoomJoinRulesEventContent::new(JoinRule::Invite)), PduBuilder::state(String::new(), &RoomJoinRulesEventContent::new(JoinRule::Invite)),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -124,7 +127,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared), &RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
), ),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -140,7 +143,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&RoomGuestAccessEventContent::new(GuestAccess::Forbidden), &RoomGuestAccessEventContent::new(GuestAccess::Forbidden),
), ),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -154,7 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &RoomNameEventContent::new(room_name)), PduBuilder::state(String::new(), &RoomNameEventContent::new(room_name)),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -168,7 +171,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://continuwuity.org/", services.config.server_name), topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://continuwuity.org/", services.config.server_name),
}), }),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -186,7 +189,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
alt_aliases: Vec::new(), alt_aliases: Vec::new(),
}), }),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()
@ -204,7 +207,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &RoomPreviewUrlsEventContent { disabled: true }), PduBuilder::state(String::new(), &RoomPreviewUrlsEventContent { disabled: true }),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.boxed() .boxed()

View file

@ -55,7 +55,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
&RoomMemberEventContent::new(MembershipState::Invite), &RoomMemberEventContent::new(MembershipState::Invite),
), ),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
@ -69,7 +69,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
&RoomMemberEventContent::new(MembershipState::Join), &RoomMemberEventContent::new(MembershipState::Join),
), ),
user_id, user_id,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
@ -83,7 +83,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
&RoomMemberEventContent::new(MembershipState::Invite), &RoomMemberEventContent::new(MembershipState::Invite),
), ),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
@ -111,7 +111,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::state(String::new(), &room_power_levels), PduBuilder::state(String::new(), &room_power_levels),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
@ -135,7 +135,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::timeline(&RoomMessageEventContent::text_markdown(welcome_message)), PduBuilder::timeline(&RoomMessageEventContent::text_markdown(welcome_message)),
server_user, server_user,
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.await?; .await?;
@ -218,7 +218,7 @@ pub async fn revoke_admin(&self, user_id: &UserId) -> Result {
..event ..event
}), }),
self.services.globals.server_user.as_ref(), self.services.globals.server_user.as_ref(),
&room_id, Some(&room_id),
&state_lock, &state_lock,
) )
.await .await

View file

@ -393,13 +393,13 @@ impl Service {
return Ok(()); return Ok(());
}; };
let response_sender = if self.is_admin_room(pdu.room_id()).await { let response_sender = if self.is_admin_room(pdu.room_id().unwrap()).await {
&self.services.globals.server_user &self.services.globals.server_user
} else { } else {
pdu.sender() pdu.sender()
}; };
self.respond_to_room(content, pdu.room_id(), response_sender) self.respond_to_room(content, pdu.room_id().unwrap(), response_sender)
.boxed() .boxed()
.await .await
} }
@ -419,7 +419,7 @@ impl Service {
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder::timeline(&self.text_or_file(content).await), PduBuilder::timeline(&self.text_or_file(content).await),
user_id, user_id,
room_id, Some(room_id),
&state_lock, &state_lock,
) )
.await .await
@ -447,7 +447,12 @@ impl Service {
self.services self.services
.timeline .timeline
.build_and_append_pdu(PduBuilder::timeline(&content), user_id, room_id, state_lock) .build_and_append_pdu(
PduBuilder::timeline(&content),
user_id,
Some(room_id),
state_lock,
)
.await?; .await?;
Ok(()) Ok(())
@ -484,7 +489,10 @@ impl Service {
} }
// Prevent unescaped !admin from being used outside of the admin room // Prevent unescaped !admin from being used outside of the admin room
if is_public_prefix && !self.is_admin_room(event.room_id()).await { if event.room_id().is_some()
&& is_public_prefix
&& !self.is_admin_room(event.room_id().unwrap()).await
{
return false; return false;
} }
@ -497,7 +505,7 @@ impl Service {
// the administrator can execute commands as the server user // the administrator can execute commands as the server user
let emergency_password_set = self.services.server.config.emergency_password.is_some(); let emergency_password_set = self.services.server.config.emergency_password.is_some();
let from_server = event.sender() == server_user && !emergency_password_set; let from_server = event.sender() == server_user && !emergency_password_set;
if from_server && self.is_admin_room(event.room_id()).await { if from_server && self.is_admin_room(event.room_id().unwrap()).await {
return false; return false;
} }

View file

@ -215,8 +215,8 @@ async fn db_lt_12(services: &Services) -> Result<()> {
for username in &services for username in &services
.users .users
.list_local_users() .list_local_users()
.map(UserId::to_owned) .map(ToOwned::to_owned)
.collect::<Vec<_>>() .collect::<Vec<OwnedUserId>>()
.await .await
{ {
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name) let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)
@ -295,8 +295,8 @@ async fn db_lt_13(services: &Services) -> Result<()> {
for username in &services for username in &services
.users .users
.list_local_users() .list_local_users()
.map(UserId::to_owned) .map(ToOwned::to_owned)
.collect::<Vec<_>>() .collect::<Vec<OwnedUserId>>()
.await .await
{ {
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name) let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)

View file

@ -183,8 +183,8 @@ impl Service {
.services .services
.users .users
.list_local_users() .list_local_users()
.map(UserId::to_owned) .map(ToOwned::to_owned)
.collect::<Vec<_>>() .collect::<Vec<OwnedUserId>>()
.await .await
{ {
let presence = self.db.get_presence(user_id).await; let presence = self.db.get_presence(user_id).await;

View file

@ -178,7 +178,7 @@ impl Service {
pub fn get_pushkeys<'a>( pub fn get_pushkeys<'a>(
&'a self, &'a self,
sender: &'a UserId, sender: &'a UserId,
) -> impl Stream<Item = &str> + Send + 'a { ) -> impl Stream<Item = &'a str> + Send + 'a {
let prefix = (sender, Interfix); let prefix = (sender, Interfix);
self.db self.db
.senderkey_pusher .senderkey_pusher
@ -287,18 +287,22 @@ impl Service {
{ {
let mut notify = None; let mut notify = None;
let mut tweaks = Vec::new(); let mut tweaks = Vec::new();
if event.room_id().is_none() {
// TODO(hydra): does this matter?
return Ok(());
}
let power_levels: RoomPowerLevelsEventContent = self let power_levels: RoomPowerLevelsEventContent = self
.services .services
.state_accessor .state_accessor
.room_state_get(event.room_id(), &StateEventType::RoomPowerLevels, "") .room_state_get(event.room_id().unwrap(), &StateEventType::RoomPowerLevels, "")
.await .await
.and_then(|event| event.get_content()) .and_then(|event| event.get_content())
.unwrap_or_default(); .unwrap_or_default();
let serialized = event.to_format(); let serialized = event.to_format();
for action in self for action in self
.get_actions(user, &ruleset, &power_levels, &serialized, event.room_id()) .get_actions(user, &ruleset, &power_levels, &serialized, event.room_id().unwrap())
.await .await
{ {
let n = match action { let n = match action {
@ -426,7 +430,7 @@ impl Service {
let mut notifi = Notification::new(d); let mut notifi = Notification::new(d);
notifi.event_id = Some(event.event_id().to_owned()); notifi.event_id = Some(event.event_id().to_owned());
notifi.room_id = Some(event.room_id().to_owned()); notifi.room_id = Some(event.room_id().unwrap().to_owned());
if http if http
.data .data
.get("org.matrix.msc4076.disable_badge_count") .get("org.matrix.msc4076.disable_badge_count")
@ -470,14 +474,14 @@ impl Service {
notifi.room_name = self notifi.room_name = self
.services .services
.state_accessor .state_accessor
.get_name(event.room_id()) .get_name(event.room_id().unwrap())
.await .await
.ok(); .ok();
notifi.room_alias = self notifi.room_alias = self
.services .services
.state_accessor .state_accessor
.get_canonical_alias(event.room_id()) .get_canonical_alias(event.room_id().unwrap())
.await .await
.ok(); .ok();

View file

@ -178,7 +178,7 @@ impl Service {
pub fn local_aliases_for_room<'a>( pub fn local_aliases_for_room<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &RoomAliasId> + Send + 'a { ) -> impl Stream<Item = &'a RoomAliasId> + Send + 'a {
let prefix = (room_id, Interfix); let prefix = (room_id, Interfix);
self.db self.db
.aliasid_alias .aliasid_alias
@ -188,7 +188,9 @@ impl Service {
} }
#[tracing::instrument(skip(self), level = "debug")] #[tracing::instrument(skip(self), level = "debug")]
pub fn all_local_aliases<'a>(&'a self) -> impl Stream<Item = (&RoomId, &str)> + Send + 'a { pub fn all_local_aliases<'a>(
&'a self,
) -> impl Stream<Item = (&'a RoomId, &'a str)> + Send + 'a {
self.db self.db
.alias_roomid .alias_roomid
.stream() .stream()

View file

@ -195,11 +195,11 @@ async fn get_auth_chain_inner(
debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"); debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events");
}, },
| Ok(pdu) => { | Ok(pdu) => {
if pdu.room_id != room_id { if pdu.room_id.is_some() && pdu.room_id != Some(room_id.to_owned()) {
return Err!(Request(Forbidden(error!( return Err!(Request(Forbidden(error!(
?event_id, ?event_id,
?room_id, ?room_id,
wrong_room_id = ?pdu.room_id, wrong_room_id = ?pdu.room_id.unwrap(),
"auth event for incorrect room" "auth event for incorrect room"
)))); ))));
} }

View file

@ -58,6 +58,8 @@ pub async fn handle_incoming_pdu<'a>(
value: BTreeMap<String, CanonicalJsonValue>, value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool, is_timeline_event: bool,
) -> Result<Option<RawPduId>> { ) -> Result<Option<RawPduId>> {
// TODO(hydra): Room IDs should be calculated before this function is called
assert!(!room_id.is_empty(), "room ID cannot be empty");
// 1. Skip the PDU if we already have it as a timeline event // 1. Skip the PDU if we already have it as a timeline event
if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await { if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await {
return Ok(Some(pdu_id)); return Ok(Some(pdu_id));

View file

@ -139,6 +139,7 @@ where
&pdu_event, &pdu_event,
None, // TODO: third party invite None, // TODO: third party invite
state_fetch, state_fetch,
create_event.as_pdu(),
) )
.await .await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;

View file

@ -99,7 +99,10 @@ impl Service {
} }
fn check_room_id<Pdu: Event>(room_id: &RoomId, pdu: &Pdu) -> Result { fn check_room_id<Pdu: Event>(room_id: &RoomId, pdu: &Pdu) -> Result {
if pdu.room_id() != room_id { if pdu
.room_id()
.is_some_and(|claimed_room_id| claimed_room_id != room_id)
{
return Err!(Request(InvalidParam(error!( return Err!(Request(InvalidParam(error!(
pdu_event_id = ?pdu.event_id(), pdu_event_id = ?pdu.event_id(),
pdu_room_id = ?pdu.room_id(), pdu_room_id = ?pdu.room_id(),

View file

@ -102,6 +102,7 @@ where
&incoming_pdu, &incoming_pdu,
None, // TODO: third party invite None, // TODO: third party invite
|ty, sk| state_fetch(ty.clone(), sk.into()), |ty, sk| state_fetch(ty.clone(), sk.into()),
create_event.as_pdu(),
) )
.await .await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
@ -123,6 +124,7 @@ where
incoming_pdu.sender(), incoming_pdu.sender(),
incoming_pdu.state_key(), incoming_pdu.state_key(),
incoming_pdu.content(), incoming_pdu.content(),
&room_version,
) )
.await?; .await?;
@ -140,6 +142,7 @@ where
&incoming_pdu, &incoming_pdu,
None, // third-party invite None, // third-party invite
state_fetch, state_fetch,
create_event.as_pdu(),
) )
.await .await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
@ -156,7 +159,7 @@ where
!self !self
.services .services
.state_accessor .state_accessor
.user_can_redact(&redact_id, incoming_pdu.sender(), incoming_pdu.room_id(), true) .user_can_redact(&redact_id, incoming_pdu.sender(), room_id, true)
.await?, .await?,
}; };
@ -313,6 +316,7 @@ where
state_ids_compressed, state_ids_compressed,
soft_fail, soft_fail,
&state_lock, &state_lock,
room_id,
) )
.await?; .await?;
@ -347,6 +351,7 @@ where
state_ids_compressed, state_ids_compressed,
soft_fail, soft_fail,
&state_lock, &state_lock,
room_id,
) )
.await?; .await?;

View file

@ -60,7 +60,7 @@ impl Data {
target: ShortEventId, target: ShortEventId,
from: PduCount, from: PduCount,
dir: Direction, dir: Direction,
) -> impl Stream<Item = (PduCount, impl Event)> + Send + '_ { ) -> impl Stream<Item = (PduCount, impl Event)> + Send + 'a {
// Query from exact position then filter excludes it (saturating_inc could skip // Query from exact position then filter excludes it (saturating_inc could skip
// events at min/max boundaries) // events at min/max boundaries)
let from_unsigned = from.into_unsigned(); let from_unsigned = from.into_unsigned();

View file

@ -65,7 +65,7 @@ impl Data {
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
since: u64, since: u64,
) -> impl Stream<Item = ReceiptItem<'_>> + Send + 'a { ) -> impl Stream<Item = ReceiptItem<'a>> + Send + 'a {
type Key<'a> = (&'a RoomId, u64, &'a UserId); type Key<'a> = (&'a RoomId, u64, &'a UserId);
type KeyVal<'a> = (Key<'a>, CanonicalJsonObject); type KeyVal<'a> = (Key<'a>, CanonicalJsonObject);

View file

@ -112,7 +112,7 @@ impl Service {
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
since: u64, since: u64,
) -> impl Stream<Item = ReceiptItem<'_>> + Send + 'a { ) -> impl Stream<Item = ReceiptItem<'a>> + Send + 'a {
self.db.readreceipts_since(room_id, since) self.db.readreceipts_since(room_id, since)
} }

View file

@ -104,7 +104,7 @@ pub fn deindex_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_b
pub async fn search_pdus<'a>( pub async fn search_pdus<'a>(
&'a self, &'a self,
query: &'a RoomQuery<'a>, query: &'a RoomQuery<'a>,
) -> Result<(usize, impl Stream<Item = impl Event + use<>> + Send + '_)> { ) -> Result<(usize, impl Stream<Item = impl Event + use<>> + Send + 'a)> {
let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await; let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await;
let filter = &query.criteria.filter; let filter = &query.criteria.filter;
@ -124,7 +124,7 @@ pub async fn search_pdus<'a>(
.wide_filter_map(move |pdu| async move { .wide_filter_map(move |pdu| async move {
self.services self.services
.state_accessor .state_accessor
.user_can_see_event(query.user_id?, pdu.room_id(), pdu.event_id()) .user_can_see_event(query.user_id?, pdu.room_id().unwrap(), pdu.event_id())
.await .await
.then_some(pdu) .then_some(pdu)
}) })
@ -137,10 +137,10 @@ pub async fn search_pdus<'a>(
// result is modeled as a stream such that callers don't have to be refactored // result is modeled as a stream such that callers don't have to be refactored
// though an additional async/wrap still exists for now // though an additional async/wrap still exists for now
#[implement(Service)] #[implement(Service)]
pub async fn search_pdu_ids( pub async fn search_pdu_ids<'a>(
&self, &'a self,
query: &RoomQuery<'_>, query: &'a RoomQuery<'_>,
) -> Result<impl Stream<Item = RawPduId> + Send + '_ + use<'_>> { ) -> Result<impl Stream<Item = RawPduId> + Send + 'a + use<'a>> {
let shortroomid = self.services.short.get_shortroomid(query.room_id).await?; let shortroomid = self.services.short.get_shortroomid(query.room_id).await?;
let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await; let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await;
@ -173,7 +173,7 @@ fn search_pdu_ids_query_words<'a>(
&'a self, &'a self,
shortroomid: ShortRoomId, shortroomid: ShortRoomId,
word: &'a str, word: &'a str,
) -> impl Stream<Item = RawPduId> + Send + '_ { ) -> impl Stream<Item = RawPduId> + Send + 'a {
self.search_pdu_ids_query_word(shortroomid, word) self.search_pdu_ids_query_word(shortroomid, word)
.map(move |key| -> RawPduId { .map(move |key| -> RawPduId {
let key = &key[prefix_len(word)..]; let key = &key[prefix_len(word)..];
@ -183,11 +183,11 @@ fn search_pdu_ids_query_words<'a>(
/// Iterate over raw database results for a word /// Iterate over raw database results for a word
#[implement(Service)] #[implement(Service)]
fn search_pdu_ids_query_word( fn search_pdu_ids_query_word<'a>(
&self, &'a self,
shortroomid: ShortRoomId, shortroomid: ShortRoomId,
word: &str, word: &'a str,
) -> impl Stream<Item = Val<'_>> + Send + '_ + use<'_> { ) -> impl Stream<Item = Val<'a>> + Send + 'a + use<'a> {
// rustc says const'ing this not yet stable // rustc says const'ing this not yet stable
let end_id: RawPduId = PduId { let end_id: RawPduId = PduId {
shortroomid, shortroomid,

View file

@ -62,7 +62,7 @@ pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEvent
pub fn multi_get_or_create_shorteventid<'a, I>( pub fn multi_get_or_create_shorteventid<'a, I>(
&'a self, &'a self,
event_ids: I, event_ids: I,
) -> impl Stream<Item = ShortEventId> + Send + '_ ) -> impl Stream<Item = ShortEventId> + Send + 'a
where where
I: Iterator<Item = &'a EventId> + Clone + Debug + Send + 'a, I: Iterator<Item = &'a EventId> + Clone + Debug + Send + 'a,
{ {

View file

@ -1,6 +1,7 @@
use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc};
use async_trait::async_trait; use async_trait::async_trait;
use conduwuit::{RoomVersion, debug};
use conduwuit_core::{ use conduwuit_core::{
Event, PduEvent, Result, err, Event, PduEvent, Result, err,
result::FlatOk, result::FlatOk,
@ -148,7 +149,7 @@ impl Service {
.roomid_spacehierarchy_cache .roomid_spacehierarchy_cache
.lock() .lock()
.await .await
.remove(&pdu.room_id); .remove(room_id);
}, },
| _ => continue, | _ => continue,
} }
@ -239,7 +240,7 @@ impl Service {
/// This adds all current state events (not including the incoming event) /// This adds all current state events (not including the incoming event)
/// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`.
#[tracing::instrument(skip(self, new_pdu), level = "debug")] #[tracing::instrument(skip(self, new_pdu), level = "debug")]
pub async fn append_to_state(&self, new_pdu: &PduEvent) -> Result<u64> { pub async fn append_to_state(&self, new_pdu: &PduEvent, room_id: &RoomId) -> Result<u64> {
const BUFSIZE: usize = size_of::<u64>(); const BUFSIZE: usize = size_of::<u64>();
let shorteventid = self let shorteventid = self
@ -248,7 +249,7 @@ impl Service {
.get_or_create_shorteventid(&new_pdu.event_id) .get_or_create_shorteventid(&new_pdu.event_id)
.await; .await;
let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id).await; let previous_shortstatehash = self.get_room_shortstatehash(room_id).await;
if let Ok(p) = previous_shortstatehash { if let Ok(p) = previous_shortstatehash {
self.db self.db
@ -319,7 +320,11 @@ impl Service {
} }
#[tracing::instrument(skip_all, level = "debug")] #[tracing::instrument(skip_all, level = "debug")]
pub async fn summary_stripped<'a, E>(&self, event: &'a E) -> Vec<Raw<AnyStrippedStateEvent>> pub async fn summary_stripped<'a, E>(
&self,
event: &'a E,
room_id: &RoomId,
) -> Vec<Raw<AnyStrippedStateEvent>>
where where
E: Event + Send + Sync, E: Event + Send + Sync,
&'a E: Event + Send, &'a E: Event + Send,
@ -338,7 +343,7 @@ impl Service {
let fetches = cells.into_iter().map(|(event_type, state_key)| { let fetches = cells.into_iter().map(|(event_type, state_key)| {
self.services self.services
.state_accessor .state_accessor
.room_state_get(event.room_id(), event_type, state_key) .room_state_get(room_id, event_type, state_key)
}); });
join_all(fetches) join_all(fetches)
@ -388,7 +393,7 @@ impl Service {
pub fn get_forward_extremities<'a>( pub fn get_forward_extremities<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &EventId> + Send + '_ { ) -> impl Stream<Item = &'a EventId> + Send + 'a {
let prefix = (room_id, Interfix); let prefix = (room_id, Interfix);
self.db self.db
@ -421,7 +426,7 @@ impl Service {
} }
/// This fetches auth events from the current state. /// This fetches auth events from the current state.
#[tracing::instrument(skip(self, content), level = "debug")] #[tracing::instrument(skip(self, content, room_version), level = "trace")]
pub async fn get_auth_events( pub async fn get_auth_events(
&self, &self,
room_id: &RoomId, room_id: &RoomId,
@ -429,13 +434,15 @@ impl Service {
sender: &UserId, sender: &UserId,
state_key: Option<&str>, state_key: Option<&str>,
content: &serde_json::value::RawValue, content: &serde_json::value::RawValue,
room_version: &RoomVersion,
) -> Result<StateMap<PduEvent>> { ) -> Result<StateMap<PduEvent>> {
let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else { let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else {
return Ok(HashMap::new()); return Ok(HashMap::new());
}; };
let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?; let auth_types =
state_res::auth_types_for_event(kind, sender, state_key, content, room_version)?;
debug!(?auth_types, "Auth types for event");
let sauthevents: HashMap<_, _> = auth_types let sauthevents: HashMap<_, _> = auth_types
.iter() .iter()
.stream() .stream()
@ -448,6 +455,7 @@ impl Service {
}) })
.collect() .collect()
.await; .await;
debug!(?sauthevents, "Auth events to fetch");
let (state_keys, event_ids): (Vec<_>, Vec<_>) = self let (state_keys, event_ids): (Vec<_>, Vec<_>) = self
.services .services
@ -461,7 +469,7 @@ impl Service {
}) })
.unzip() .unzip()
.await; .await;
debug!(?state_keys, ?event_ids, "Auth events found in state");
self.services self.services
.short .short
.multi_get_eventid_from_short(event_ids.into_iter().stream()) .multi_get_eventid_from_short(event_ids.into_iter().stream())
@ -473,6 +481,7 @@ impl Service {
.get_pdu(&event_id) .get_pdu(&event_id)
.await .await
.map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu)) .map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu))
.inspect_err(|e| warn!("Failed to get auth event {event_id}: {e:?}"))
.ok() .ok()
}) })
.collect() .collect()

View file

@ -161,7 +161,7 @@ pub async fn user_can_invite(
&RoomMemberEventContent::new(MembershipState::Invite), &RoomMemberEventContent::new(MembershipState::Invite),
), ),
sender, sender,
room_id, Some(room_id),
state_lock, state_lock,
) )
.await .await

View file

@ -144,7 +144,7 @@ pub fn clear_appservice_in_room_cache(&self) { self.appservice_in_room_cache.wri
pub fn room_servers<'a>( pub fn room_servers<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &ServerName> + Send + 'a { ) -> impl Stream<Item = &'a ServerName> + Send + 'a {
let prefix = (room_id, Interfix); let prefix = (room_id, Interfix);
self.db self.db
.roomserverids .roomserverids
@ -167,7 +167,7 @@ pub async fn server_in_room<'a>(&'a self, server: &'a ServerName, room_id: &'a R
pub fn server_rooms<'a>( pub fn server_rooms<'a>(
&'a self, &'a self,
server: &'a ServerName, server: &'a ServerName,
) -> impl Stream<Item = &RoomId> + Send + 'a { ) -> impl Stream<Item = &'a RoomId> + Send + 'a {
let prefix = (server, Interfix); let prefix = (server, Interfix);
self.db self.db
.serverroomids .serverroomids
@ -202,7 +202,7 @@ pub fn get_shared_rooms<'a>(
&'a self, &'a self,
user_a: &'a UserId, user_a: &'a UserId,
user_b: &'a UserId, user_b: &'a UserId,
) -> impl Stream<Item = &RoomId> + Send + 'a { ) -> impl Stream<Item = &'a RoomId> + Send + 'a {
use conduwuit::utils::set; use conduwuit::utils::set;
let a = self.rooms_joined(user_a); let a = self.rooms_joined(user_a);
@ -216,7 +216,7 @@ pub fn get_shared_rooms<'a>(
pub fn room_members<'a>( pub fn room_members<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a { ) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix); let prefix = (room_id, Interfix);
self.db self.db
.roomuserid_joined .roomuserid_joined
@ -239,7 +239,7 @@ pub async fn room_joined_count(&self, room_id: &RoomId) -> Result<u64> {
pub fn local_users_in_room<'a>( pub fn local_users_in_room<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a { ) -> impl Stream<Item = &'a UserId> + Send + 'a {
self.room_members(room_id) self.room_members(room_id)
.ready_filter(|user| self.services.globals.user_is_local(user)) .ready_filter(|user| self.services.globals.user_is_local(user))
} }
@ -251,7 +251,7 @@ pub fn local_users_in_room<'a>(
pub fn active_local_users_in_room<'a>( pub fn active_local_users_in_room<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a { ) -> impl Stream<Item = &'a UserId> + Send + 'a {
self.local_users_in_room(room_id) self.local_users_in_room(room_id)
.filter(|user| self.services.users.is_active(user)) .filter(|user| self.services.users.is_active(user))
} }
@ -273,7 +273,7 @@ pub async fn room_invited_count(&self, room_id: &RoomId) -> Result<u64> {
pub fn room_useroncejoined<'a>( pub fn room_useroncejoined<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a { ) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix); let prefix = (room_id, Interfix);
self.db self.db
.roomuseroncejoinedids .roomuseroncejoinedids
@ -288,7 +288,7 @@ pub fn room_useroncejoined<'a>(
pub fn room_members_invited<'a>( pub fn room_members_invited<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a { ) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix); let prefix = (room_id, Interfix);
self.db self.db
.roomuserid_invitecount .roomuserid_invitecount
@ -303,7 +303,7 @@ pub fn room_members_invited<'a>(
pub fn room_members_knocked<'a>( pub fn room_members_knocked<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a { ) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix); let prefix = (room_id, Interfix);
self.db self.db
.roomuserid_knockedcount .roomuserid_knockedcount
@ -347,7 +347,7 @@ pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result
pub fn rooms_joined<'a>( pub fn rooms_joined<'a>(
&'a self, &'a self,
user_id: &'a UserId, user_id: &'a UserId,
) -> impl Stream<Item = &RoomId> + Send + 'a { ) -> impl Stream<Item = &'a RoomId> + Send + 'a {
self.db self.db
.userroomid_joined .userroomid_joined
.keys_raw_prefix(user_id) .keys_raw_prefix(user_id)

View file

@ -81,7 +81,7 @@ pub async fn servers_route_via(&self, room_id: &RoomId) -> Result<Vec<OwnedServe
pub fn servers_invite_via<'a>( pub fn servers_invite_via<'a>(
&'a self, &'a self,
room_id: &'a RoomId, room_id: &'a RoomId,
) -> impl Stream<Item = &ServerName> + Send + 'a { ) -> impl Stream<Item = &'a ServerName> + Send + 'a {
type KeyVal<'a> = (Ignore, Vec<&'a ServerName>); type KeyVal<'a> = (Ignore, Vec<&'a ServerName>);
self.db self.db

View file

@ -42,6 +42,7 @@ pub async fn append_incoming_pdu<'a, Leaves>(
state_ids_compressed: Arc<CompressedState>, state_ids_compressed: Arc<CompressedState>,
soft_fail: bool, soft_fail: bool,
state_lock: &'a RoomMutexGuard, state_lock: &'a RoomMutexGuard,
room_id: &'a ruma::RoomId,
) -> Result<Option<RawPduId>> ) -> Result<Option<RawPduId>>
where where
Leaves: Iterator<Item = &'a EventId> + Send + 'a, Leaves: Iterator<Item = &'a EventId> + Send + 'a,
@ -51,24 +52,24 @@ where
// fail. // fail.
self.services self.services
.state .state
.set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed) .set_event_state(&pdu.event_id, room_id, state_ids_compressed)
.await?; .await?;
if soft_fail { if soft_fail {
self.services self.services
.pdu_metadata .pdu_metadata
.mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); .mark_as_referenced(room_id, pdu.prev_events.iter().map(AsRef::as_ref));
self.services self.services
.state .state
.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) .set_forward_extremities(room_id, new_room_leaves, state_lock)
.await; .await;
return Ok(None); return Ok(None);
} }
let pdu_id = self let pdu_id = self
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock) .append_pdu(pdu, pdu_json, new_room_leaves, state_lock, room_id)
.await?; .await?;
Ok(Some(pdu_id)) Ok(Some(pdu_id))
@ -88,6 +89,7 @@ pub async fn append_pdu<'a, Leaves>(
mut pdu_json: CanonicalJsonObject, mut pdu_json: CanonicalJsonObject,
leaves: Leaves, leaves: Leaves,
state_lock: &'a RoomMutexGuard, state_lock: &'a RoomMutexGuard,
room_id: &'a ruma::RoomId,
) -> Result<RawPduId> ) -> Result<RawPduId>
where where
Leaves: Iterator<Item = &'a EventId> + Send + 'a, Leaves: Iterator<Item = &'a EventId> + Send + 'a,
@ -98,7 +100,7 @@ where
let shortroomid = self let shortroomid = self
.services .services
.short .short
.get_shortroomid(pdu.room_id()) .get_shortroomid(room_id)
.await .await
.map_err(|_| err!(Database("Room does not exist")))?; .map_err(|_| err!(Database("Room does not exist")))?;
@ -151,14 +153,14 @@ where
// We must keep track of all events that have been referenced. // We must keep track of all events that have been referenced.
self.services self.services
.pdu_metadata .pdu_metadata
.mark_as_referenced(pdu.room_id(), pdu.prev_events().map(AsRef::as_ref)); .mark_as_referenced(room_id, pdu.prev_events().map(AsRef::as_ref));
self.services self.services
.state .state
.set_forward_extremities(pdu.room_id(), leaves, state_lock) .set_forward_extremities(room_id, leaves, state_lock)
.await; .await;
let insert_lock = self.mutex_insert.lock(pdu.room_id()).await; let insert_lock = self.mutex_insert.lock(room_id).await;
let count1 = self.services.globals.next_count().unwrap(); let count1 = self.services.globals.next_count().unwrap();
@ -166,11 +168,11 @@ where
// appending fails // appending fails
self.services self.services
.read_receipt .read_receipt
.private_read_set(pdu.room_id(), pdu.sender(), count1); .private_read_set(room_id, pdu.sender(), count1);
self.services self.services
.user .user
.reset_notification_counts(pdu.sender(), pdu.room_id()); .reset_notification_counts(pdu.sender(), room_id);
let count2 = PduCount::Normal(self.services.globals.next_count().unwrap()); let count2 = PduCount::Normal(self.services.globals.next_count().unwrap());
let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into(); let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into();
@ -184,14 +186,14 @@ where
let power_levels: RoomPowerLevelsEventContent = self let power_levels: RoomPowerLevelsEventContent = self
.services .services
.state_accessor .state_accessor
.room_state_get_content(pdu.room_id(), &StateEventType::RoomPowerLevels, "") .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "")
.await .await
.unwrap_or_default(); .unwrap_or_default();
let mut push_target: HashSet<_> = self let mut push_target: HashSet<_> = self
.services .services
.state_cache .state_cache
.active_local_users_in_room(pdu.room_id()) .active_local_users_in_room(room_id)
.map(ToOwned::to_owned) .map(ToOwned::to_owned)
// Don't notify the sender of their own events, and dont send from ignored users // Don't notify the sender of their own events, and dont send from ignored users
.ready_filter(|user| *user != pdu.sender()) .ready_filter(|user| *user != pdu.sender())
@ -230,7 +232,7 @@ where
for action in self for action in self
.services .services
.pusher .pusher
.get_actions(user, &rules_for_user, &power_levels, &serialized, pdu.room_id()) .get_actions(user, &rules_for_user, &power_levels, &serialized, room_id)
.await .await
{ {
match action { match action {
@ -268,20 +270,20 @@ where
} }
self.db self.db
.increment_notification_counts(pdu.room_id(), notifies, highlights); .increment_notification_counts(room_id, notifies, highlights);
match *pdu.kind() { match *pdu.kind() {
| TimelineEventType::RoomRedaction => { | TimelineEventType::RoomRedaction => {
use RoomVersionId::*; use RoomVersionId::*;
let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?; let room_version_id = self.services.state.get_room_version(room_id).await?;
match room_version_id { match room_version_id {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
if let Some(redact_id) = pdu.redacts() { if let Some(redact_id) = pdu.redacts() {
if self if self
.services .services
.state_accessor .state_accessor
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) .user_can_redact(redact_id, pdu.sender(), room_id, false)
.await? .await?
{ {
self.redact_pdu(redact_id, pdu, shortroomid).await?; self.redact_pdu(redact_id, pdu, shortroomid).await?;
@ -294,7 +296,7 @@ where
if self if self
.services .services
.state_accessor .state_accessor
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) .user_can_redact(redact_id, pdu.sender(), room_id, false)
.await? .await?
{ {
self.redact_pdu(redact_id, pdu, shortroomid).await?; self.redact_pdu(redact_id, pdu, shortroomid).await?;
@ -310,7 +312,7 @@ where
.roomid_spacehierarchy_cache .roomid_spacehierarchy_cache
.lock() .lock()
.await .await
.remove(pdu.room_id()); .remove(room_id);
}, },
| TimelineEventType::RoomMember => { | TimelineEventType::RoomMember => {
if let Some(state_key) = pdu.state_key() { if let Some(state_key) = pdu.state_key() {
@ -320,8 +322,12 @@ where
let content: RoomMemberEventContent = pdu.get_content()?; let content: RoomMemberEventContent = pdu.get_content()?;
let stripped_state = match content.membership { let stripped_state = match content.membership {
| MembershipState::Invite | MembershipState::Knock => | MembershipState::Invite | MembershipState::Knock => self
self.services.state.summary_stripped(pdu).await.into(), .services
.state
.summary_stripped(pdu, room_id)
.await
.into(),
| _ => None, | _ => None,
}; };
@ -331,7 +337,7 @@ where
self.services self.services
.state_cache .state_cache
.update_membership( .update_membership(
pdu.room_id(), room_id,
target_user_id, target_user_id,
content, content,
pdu.sender(), pdu.sender(),
@ -392,7 +398,7 @@ where
if self if self
.services .services
.state_cache .state_cache
.appservice_in_room(pdu.room_id(), appservice) .appservice_in_room(room_id, appservice)
.await .await
{ {
self.services self.services
@ -430,12 +436,12 @@ where
let matching_aliases = |aliases: NamespaceRegex| { let matching_aliases = |aliases: NamespaceRegex| {
self.services self.services
.alias .alias
.local_aliases_for_room(pdu.room_id()) .local_aliases_for_room(room_id)
.ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) .ready_any(move |room_alias| aliases.is_match(room_alias.as_str()))
}; };
if matching_aliases(appservice.aliases.clone()).await if matching_aliases(appservice.aliases.clone()).await
|| appservice.rooms.is_match(pdu.room_id().as_str()) || appservice.rooms.is_match(room_id.as_str())
|| matching_users(&appservice.users) || matching_users(&appservice.users)
{ {
self.services self.services

View file

@ -1,5 +1,6 @@
use std::{collections::HashSet, iter::once}; use std::{collections::HashSet, iter::once};
use conduwuit::{RoomVersion, debug_warn, trace};
use conduwuit_core::{ use conduwuit_core::{
Err, Result, implement, Err, Result, implement,
matrix::{event::Event, pdu::PduBuilder}, matrix::{event::Event, pdu::PduBuilder},
@ -11,6 +12,7 @@ use ruma::{
events::{ events::{
TimelineEventType, TimelineEventType,
room::{ room::{
create::RoomCreateEventContent,
member::{MembershipState, RoomMemberEventContent}, member::{MembershipState, RoomMemberEventContent},
redaction::RoomRedactionEventContent, redaction::RoomRedactionEventContent,
}, },
@ -23,32 +25,36 @@ use super::RoomMutexGuard;
/// takes a roomid_mutex_state, meaning that only this function is able to /// takes a roomid_mutex_state, meaning that only this function is able to
/// mutate the room state. /// mutate the room state.
#[implement(super::Service)] #[implement(super::Service)]
#[tracing::instrument(skip(self, state_lock), level = "debug")] #[tracing::instrument(skip(self, state_lock), level = "trace")]
pub async fn build_and_append_pdu( pub async fn build_and_append_pdu(
&self, &self,
pdu_builder: PduBuilder, pdu_builder: PduBuilder,
sender: &UserId, sender: &UserId,
room_id: &RoomId, room_id: Option<&RoomId>,
state_lock: &RoomMutexGuard, state_lock: &RoomMutexGuard,
) -> Result<OwnedEventId> { ) -> Result<OwnedEventId> {
let (pdu, pdu_json) = self let (pdu, pdu_json) = self
.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)
.await?; .await?;
if self.services.admin.is_admin_room(pdu.room_id()).await { let room_id = pdu.room_id_or_hash();
trace!("Checking if room {room_id} is an admin room");
if self.services.admin.is_admin_room(&room_id).await {
trace!("Room {room_id} is an admin room, checking PDU for admin room restrictions");
self.check_pdu_for_admin_room(&pdu, sender).boxed().await?; self.check_pdu_for_admin_room(&pdu, sender).boxed().await?;
} }
// If redaction event is not authorized, do not append it to the timeline // If redaction event is not authorized, do not append it to the timeline
if *pdu.kind() == TimelineEventType::RoomRedaction { if *pdu.kind() == TimelineEventType::RoomRedaction {
use RoomVersionId::*; use RoomVersionId::*;
match self.services.state.get_room_version(pdu.room_id()).await? { trace!("Running redaction checks for room {room_id}");
match self.services.state.get_room_version(&room_id).await? {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
if let Some(redact_id) = pdu.redacts() { if let Some(redact_id) = pdu.redacts() {
if !self if !self
.services .services
.state_accessor .state_accessor
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) .user_can_redact(redact_id, pdu.sender(), &room_id, false)
.await? .await?
{ {
return Err!(Request(Forbidden("User cannot redact this event."))); return Err!(Request(Forbidden("User cannot redact this event.")));
@ -61,7 +67,7 @@ pub async fn build_and_append_pdu(
if !self if !self
.services .services
.state_accessor .state_accessor
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) .user_can_redact(redact_id, pdu.sender(), &room_id, false)
.await? .await?
{ {
return Err!(Request(Forbidden("User cannot redact this event."))); return Err!(Request(Forbidden("User cannot redact this event.")));
@ -72,6 +78,7 @@ pub async fn build_and_append_pdu(
} }
if *pdu.kind() == TimelineEventType::RoomMember { if *pdu.kind() == TimelineEventType::RoomMember {
trace!("Running room member checks for room {room_id}");
let content: RoomMemberEventContent = pdu.get_content()?; let content: RoomMemberEventContent = pdu.get_content()?;
if content.join_authorized_via_users_server.is_some() if content.join_authorized_via_users_server.is_some()
@ -93,12 +100,27 @@ pub async fn build_and_append_pdu(
))); )));
} }
} }
if *pdu.kind() == TimelineEventType::RoomCreate {
trace!("Running room create checks for room {room_id}");
let content: RoomCreateEventContent = pdu.get_content()?;
let room_features = RoomVersion::new(&content.room_version)?;
if room_features.room_ids_as_hashes {
// bootstrap shortid for room
debug_warn!(%room_id, "Bootstrapping shortid for room");
self.services
.short
.get_or_create_shortroomid(&room_id)
.await;
}
}
// We append to state before appending the pdu, so we don't have a moment in // We append to state before appending the pdu, so we don't have a moment in
// time with the pdu without it's state. This is okay because append_pdu can't // time with the pdu without it's state. This is okay because append_pdu can't
// fail. // fail.
let statehashid = self.services.state.append_to_state(&pdu).await?; trace!("Appending {} state for room {room_id}", pdu.event_id());
let statehashid = self.services.state.append_to_state(&pdu, &room_id).await?;
trace!("Generating raw ID for PDU {}", pdu.event_id());
let pdu_id = self let pdu_id = self
.append_pdu( .append_pdu(
&pdu, &pdu,
@ -107,20 +129,22 @@ pub async fn build_and_append_pdu(
// of the room // of the room
once(pdu.event_id()), once(pdu.event_id()),
state_lock, state_lock,
&room_id,
) )
.boxed() .boxed()
.await?; .await?;
// We set the room state after inserting the pdu, so that we never have a moment // We set the room state after inserting the pdu, so that we never have a moment
// in time where events in the current room state do not exist // in time where events in the current room state do not exist
trace!("Setting room state for room {room_id}");
self.services self.services
.state .state
.set_room_state(pdu.room_id(), statehashid, state_lock); .set_room_state(&room_id, statehashid, state_lock);
let mut servers: HashSet<OwnedServerName> = self let mut servers: HashSet<OwnedServerName> = self
.services .services
.state_cache .state_cache
.room_servers(pdu.room_id()) .room_servers(&room_id)
.map(ToOwned::to_owned) .map(ToOwned::to_owned)
.collect() .collect()
.await; .await;
@ -141,11 +165,13 @@ pub async fn build_and_append_pdu(
// room_servers() and/or the if statement above // room_servers() and/or the if statement above
servers.remove(self.services.globals.server_name()); servers.remove(self.services.globals.server_name());
trace!("Sending PDU {} to {} servers", pdu.event_id(), servers.len());
self.services self.services
.sending .sending
.send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id)
.await?; .await?;
trace!("Event {} in room {:?} has been appended", pdu.event_id(), room_id);
Ok(pdu.event_id().to_owned()) Ok(pdu.event_id().to_owned())
} }
@ -179,7 +205,7 @@ where
let count = self let count = self
.services .services
.state_cache .state_cache
.room_members(pdu.room_id()) .room_members(&pdu.room_id_or_hash())
.ready_filter(|user| self.services.globals.user_is_local(user)) .ready_filter(|user| self.services.globals.user_is_local(user))
.ready_filter(|user| *user != target) .ready_filter(|user| *user != target)
.boxed() .boxed()
@ -203,7 +229,7 @@ where
let count = self let count = self
.services .services
.state_cache .state_cache
.room_members(pdu.room_id()) .room_members(&pdu.room_id_or_hash())
.ready_filter(|user| self.services.globals.user_is_local(user)) .ready_filter(|user| self.services.globals.user_is_local(user))
.ready_filter(|user| *user != target) .ready_filter(|user| *user != target)
.boxed() .boxed()

View file

@ -1,5 +1,6 @@
use std::cmp; use std::{cmp, collections::HashMap};
use conduwuit::{smallstr::SmallString, trace};
use conduwuit_core::{ use conduwuit_core::{
Err, Error, Result, err, implement, Err, Error, Result, err, implement,
matrix::{ matrix::{
@ -11,12 +12,13 @@ use conduwuit_core::{
}; };
use futures::{StreamExt, TryStreamExt, future, future::ready}; use futures::{StreamExt, TryStreamExt, future, future::ready};
use ruma::{ use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId, UserId, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId,
UserId,
canonical_json::to_canonical_value, canonical_json::to_canonical_value,
events::{StateEventType, TimelineEventType, room::create::RoomCreateEventContent}, events::{StateEventType, TimelineEventType, room::create::RoomCreateEventContent},
uint, uint,
}; };
use serde_json::value::to_raw_value; use serde_json::value::{RawValue, to_raw_value};
use tracing::warn; use tracing::warn;
use super::RoomMutexGuard; use super::RoomMutexGuard;
@ -26,10 +28,25 @@ pub async fn create_hash_and_sign_event(
&self, &self,
pdu_builder: PduBuilder, pdu_builder: PduBuilder,
sender: &UserId, sender: &UserId,
room_id: &RoomId, room_id: Option<&RoomId>,
_mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room
* state mutex */ * state mutex */
) -> Result<(PduEvent, CanonicalJsonObject)> { ) -> Result<(PduEvent, CanonicalJsonObject)> {
fn from_evt(
room_id: OwnedRoomId,
event_type: TimelineEventType,
content: Box<RawValue>,
) -> Result<RoomVersionId> {
if event_type == TimelineEventType::RoomCreate {
let content: RoomCreateEventContent = serde_json::from_str(content.get())?;
Ok(content.room_version)
} else {
Err(Error::InconsistentRoomState(
"non-create event for room of unknown version",
room_id,
))
}
}
let PduBuilder { let PduBuilder {
event_type, event_type,
content, content,
@ -38,67 +55,84 @@ pub async fn create_hash_and_sign_event(
redacts, redacts,
timestamp, timestamp,
} = pdu_builder; } = pdu_builder;
let prev_events: Vec<OwnedEventId> = self
.services
.state
.get_forward_extremities(room_id)
.take(20)
.map(Into::into)
.collect()
.await;
// If there was no create event yet, assume we are creating a room // If there was no create event yet, assume we are creating a room
let room_version_id = self let room_version_id = match room_id {
.services | Some(room_id) => self
.state .services
.get_room_version(room_id) .state
.await .get_room_version(room_id)
.or_else(|_| { .await
if event_type == TimelineEventType::RoomCreate { .or_else(|_| from_evt(room_id.to_owned(), event_type.clone(), content.clone()))?,
let content: RoomCreateEventContent = serde_json::from_str(content.get())?; | None => from_evt(
Ok(content.room_version) RoomId::new(self.services.globals.server_name()),
} else { event_type.clone(),
Err(Error::InconsistentRoomState( content.clone(),
"non-create event for room of unknown version", )?,
room_id.to_owned(), };
))
}
})?;
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
// TODO(hydra): Only create events can lack a room ID.
let auth_events = self let prev_events: Vec<OwnedEventId> = match room_id {
.services | Some(room_id) =>
.state self.services
.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content) .state
.await?; .get_forward_extremities(room_id)
.take(20)
.map(Into::into)
.collect()
.await,
| None => Vec::new(),
};
let auth_events: HashMap<(StateEventType, SmallString<[u8; 48]>), PduEvent> = match room_id {
| Some(room_id) =>
self.services
.state
.get_auth_events(
room_id,
&event_type,
sender,
state_key.as_deref(),
&content,
&room_version,
)
.await?,
| None => HashMap::new(),
};
// Our depth is the maximum depth of prev_events + 1 // Our depth is the maximum depth of prev_events + 1
let depth = prev_events let depth = match room_id {
.iter() | Some(_) => prev_events
.stream() .iter()
.map(Ok) .stream()
.and_then(|event_id| self.get_pdu(event_id)) .map(Ok)
.and_then(|pdu| future::ok(pdu.depth)) .and_then(|event_id| self.get_pdu(event_id))
.ignore_err() .and_then(|pdu| future::ok(pdu.depth))
.ready_fold(uint!(0), cmp::max) .ignore_err()
.await .ready_fold(uint!(0), cmp::max)
.saturating_add(uint!(1)); .await
.saturating_add(uint!(1)),
| None => uint!(1),
};
let mut unsigned = unsigned.unwrap_or_default(); let mut unsigned = unsigned.unwrap_or_default();
if let Some(state_key) = &state_key { if let Some(room_id) = room_id {
if let Ok(prev_pdu) = self if let Some(state_key) = &state_key {
.services if let Ok(prev_pdu) = self
.state_accessor .services
.room_state_get(room_id, &event_type.to_string().into(), state_key) .state_accessor
.await .room_state_get(room_id, &event_type.clone().to_string().into(), state_key)
{ .await
unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value()); {
unsigned.insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?); unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value());
unsigned unsigned
.insert("replaces_state".to_owned(), serde_json::to_value(prev_pdu.event_id())?); .insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?);
unsigned.insert(
"replaces_state".to_owned(),
serde_json::to_value(prev_pdu.event_id())?,
);
}
} }
} }
@ -109,15 +143,15 @@ pub async fn create_hash_and_sign_event(
// The first two events in a room are always m.room.create and m.room.member, // The first two events in a room are always m.room.create and m.room.member,
// so any other events with that same depth are illegal. // so any other events with that same depth are illegal.
warn!( warn!(
"Had unsafe depth {depth} when creating non-state event in {room_id}. Cowardly \ "Had unsafe depth {depth} when creating non-state event in {}. Cowardly aborting",
aborting" room_id.expect("room_id is Some here").as_str()
); );
return Err!(Request(Unknown("Unsafe depth for non-state event."))); return Err!(Request(Unknown("Unsafe depth for non-state event.")));
} }
let mut pdu = PduEvent { let mut pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(), event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: room_id.to_owned(), room_id: room_id.map(ToOwned::to_owned),
sender: sender.to_owned(), sender: sender.to_owned(),
origin: None, origin: None,
origin_server_ts: timestamp.map_or_else( origin_server_ts: timestamp.map_or_else(
@ -152,11 +186,30 @@ pub async fn create_hash_and_sign_event(
ready(auth_events.get(&key).map(ToOwned::to_owned)) ready(auth_events.get(&key).map(ToOwned::to_owned))
}; };
let room_id_or_hash = pdu.room_id_or_hash();
let create_pdu = match &pdu.kind {
| TimelineEventType::RoomCreate => None,
| _ => Some(
self.services
.state_accessor
.room_state_get(&room_id_or_hash, &StateEventType::RoomCreate, "")
.await
.map_err(|e| {
err!(Request(Forbidden(warn!("Failed to fetch room create event: {e}"))))
})?,
),
};
let create_event = match &pdu.kind {
| TimelineEventType::RoomCreate => &pdu,
| _ => create_pdu.as_ref().unwrap().as_pdu(),
};
let auth_check = state_res::auth_check( let auth_check = state_res::auth_check(
&room_version, &room_version,
&pdu, &pdu,
None, // TODO: third_party_invite None, // TODO: third_party_invite
auth_fetch, auth_fetch,
create_event,
) )
.await .await
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
@ -164,6 +217,11 @@ pub async fn create_hash_and_sign_event(
if !auth_check { if !auth_check {
return Err!(Request(Forbidden("Event is not authorized."))); return Err!(Request(Forbidden("Event is not authorized.")));
} }
trace!(
"Event {} in room {} is authorized",
pdu.event_id,
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
);
// Hash and sign // Hash and sign
let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| { let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| {
@ -178,13 +236,13 @@ pub async fn create_hash_and_sign_event(
}, },
} }
// Add origin because synapse likes that (and it's required in the spec)
pdu_json.insert( pdu_json.insert(
"origin".to_owned(), "origin".to_owned(),
to_canonical_value(self.services.globals.server_name()) to_canonical_value(self.services.globals.server_name())
.expect("server name is a valid CanonicalJsonValue"), .expect("server name is a valid CanonicalJsonValue"),
); );
trace!("hashing and signing event {}", pdu.event_id);
if let Err(e) = self if let Err(e) = self
.services .services
.server_keys .server_keys
@ -204,30 +262,45 @@ pub async fn create_hash_and_sign_event(
pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into())); pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into()));
// Check with the policy server // Check with the policy server
match self // TODO(hydra): Skip this check for create events (why didnt we do this
.services // already?)
.event_handler if room_id.is_some() {
.ask_policy_server(&pdu, room_id) trace!(
.await "Checking event {} in room {} with policy server",
{ pdu.event_id,
| Ok(true) => {}, pdu.room_id.as_ref().map_or("None", |id| id.as_str())
| Ok(false) => { );
return Err!(Request(Forbidden(debug_warn!( match self
"Policy server marked this event as spam" .services
)))); .event_handler
}, .ask_policy_server(&pdu, &pdu.room_id_or_hash())
| Err(e) => { .await
// fail open {
warn!("Failed to check event with policy server: {e}"); | Ok(true) => {},
}, | Ok(false) => {
return Err!(Request(Forbidden(debug_warn!(
"Policy server marked this event as spam"
))));
},
| Err(e) => {
// fail open
warn!("Failed to check event with policy server: {e}");
},
}
} }
// Generate short event id // Generate short event id
trace!(
"Generating short event ID for {} in room {}",
pdu.event_id,
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
);
let _shorteventid = self let _shorteventid = self
.services .services
.short .short
.get_or_create_shorteventid(&pdu.event_id) .get_or_create_shorteventid(&pdu.event_id)
.await; .await;
trace!("New PDU created: {pdu:?}");
Ok((pdu, pdu_json)) Ok((pdu, pdu_json))
} }

View file

@ -39,7 +39,11 @@ pub async fn redact_pdu<Pdu: Event + Send + Sync>(
} }
} }
let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?; let room_version_id = self
.services
.state
.get_room_version(&pdu.room_id_or_hash())
.await?;
pdu.redact(&room_version_id, reason.to_value())?; pdu.redact(&room_version_id, reason.to_value())?;

View file

@ -798,7 +798,7 @@ impl Service {
let unread: UInt = self let unread: UInt = self
.services .services
.user .user
.notification_count(&user_id, pdu.room_id()) .notification_count(&user_id, &pdu.room_id_or_hash())
.await .await
.try_into() .try_into()
.expect("notification count can't go that high"); .expect("notification count can't go that high");

View file

@ -1,6 +1,6 @@
use std::borrow::Borrow; use std::borrow::Borrow;
use conduwuit::{Err, Result, implement}; use conduwuit::{Err, Result, debug, debug_error, implement};
use ruma::{ use ruma::{
CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId, CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId,
api::federation::discovery::VerifyKey, api::federation::discovery::VerifyKey,
@ -19,9 +19,11 @@ pub async fn get_event_keys(
let required = match required_keys(object, version) { let required = match required_keys(object, version) {
| Ok(required) => required, | Ok(required) => required,
| Err(e) => { | Err(e) => {
debug_error!("Failed to determine keys required to verify: {e}");
return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")); return Err!(BadServerResponse("Failed to determine keys required to verify: {e}"));
}, },
}; };
debug!(?required, "Keys required to verify event");
let batch = required let batch = required
.iter() .iter()
@ -61,6 +63,7 @@ where
} }
#[implement(super::Service)] #[implement(super::Service)]
#[tracing::instrument(skip(self))]
pub async fn get_verify_key( pub async fn get_verify_key(
&self, &self,
origin: &ServerName, origin: &ServerName,
@ -70,6 +73,7 @@ pub async fn get_verify_key(
let notary_only = self.services.server.config.only_query_trusted_key_servers; let notary_only = self.services.server.config.only_query_trusted_key_servers;
if let Some(result) = self.verify_keys_for(origin).await.remove(key_id) { if let Some(result) = self.verify_keys_for(origin).await.remove(key_id) {
debug!("Found key in cache");
return Ok(result); return Ok(result);
} }

View file

@ -8,7 +8,7 @@ mod verify;
use std::{collections::BTreeMap, sync::Arc, time::Duration}; use std::{collections::BTreeMap, sync::Arc, time::Duration};
use conduwuit::{ use conduwuit::{
Result, Server, implement, Result, Server, debug, debug_error, debug_warn, implement,
utils::{IterStream, timepoint_from_now}, utils::{IterStream, timepoint_from_now},
}; };
use database::{Deserialized, Json, Map}; use database::{Deserialized, Json, Map};
@ -112,6 +112,7 @@ async fn add_signing_keys(&self, new_keys: ServerSigningKeys) {
} }
#[implement(Service)] #[implement(Service)]
#[tracing::instrument(skip(self, object))]
pub async fn required_keys_exist( pub async fn required_keys_exist(
&self, &self,
object: &CanonicalJsonObject, object: &CanonicalJsonObject,
@ -119,10 +120,12 @@ pub async fn required_keys_exist(
) -> bool { ) -> bool {
use ruma::signatures::required_keys; use ruma::signatures::required_keys;
debug!(?object, "Checking required keys exist");
let Ok(required_keys) = required_keys(object, version) else { let Ok(required_keys) = required_keys(object, version) else {
debug_error!("Failed to determine required keys");
return false; return false;
}; };
debug!(?required_keys, "Required keys to verify event");
required_keys required_keys
.iter() .iter()
.flat_map(|(server, key_ids)| key_ids.iter().map(move |key_id| (server, key_id))) .flat_map(|(server, key_ids)| key_ids.iter().map(move |key_id| (server, key_id)))
@ -132,6 +135,7 @@ pub async fn required_keys_exist(
} }
#[implement(Service)] #[implement(Service)]
#[tracing::instrument(skip(self))]
pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool { pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool {
type KeysMap<'a> = BTreeMap<&'a ServerSigningKeyId, &'a RawJsonValue>; type KeysMap<'a> = BTreeMap<&'a ServerSigningKeyId, &'a RawJsonValue>;
@ -142,6 +146,7 @@ pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSignin
.await .await
.deserialized::<Raw<ServerSigningKeys>>() .deserialized::<Raw<ServerSigningKeys>>()
else { else {
debug_warn!("No known signing keys found for {origin}");
return false; return false;
}; };
@ -157,6 +162,7 @@ pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSignin
} }
} }
debug_warn!("Key {key_id} not found for {origin}");
false false
} }

View file

@ -1,4 +1,6 @@
use conduwuit::{Err, Result, implement, matrix::event::gen_event_id_canonical_json}; use conduwuit::{
Err, Result, debug, debug_warn, implement, matrix::event::gen_event_id_canonical_json,
};
use ruma::{ use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified,
}; };
@ -28,18 +30,25 @@ pub async fn validate_and_add_event_id_no_fetch(
pdu: &RawJsonValue, pdu: &RawJsonValue,
room_version: &RoomVersionId, room_version: &RoomVersionId,
) -> Result<(OwnedEventId, CanonicalJsonObject)> { ) -> Result<(OwnedEventId, CanonicalJsonObject)> {
debug!(?pdu, "Validating PDU without fetching keys");
let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?; let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?;
debug!(event_id = event_id.as_str(), "Generated event ID, checking required keys");
if !self.required_keys_exist(&value, room_version).await { if !self.required_keys_exist(&value, room_version).await {
debug_warn!(
"Event {event_id} is missing required keys, cannot verify without fetching keys"
);
return Err!(BadServerResponse(debug_warn!( return Err!(BadServerResponse(debug_warn!(
"Event {event_id} cannot be verified: missing keys." "Event {event_id} cannot be verified: missing keys."
))); )));
} }
debug!("All required keys exist, verifying event");
if let Err(e) = self.verify_event(&value, Some(room_version)).await { if let Err(e) = self.verify_event(&value, Some(room_version)).await {
debug_warn!("Event verification failed");
return Err!(BadServerResponse(debug_error!( return Err!(BadServerResponse(debug_error!(
"Event {event_id} failed verification: {e:?}" "Event {event_id} failed verification: {e:?}"
))); )));
} }
debug!("Event verified successfully");
value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into())); value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into()));
@ -52,7 +61,7 @@ pub async fn verify_event(
event: &CanonicalJsonObject, event: &CanonicalJsonObject,
room_version: Option<&RoomVersionId>, room_version: Option<&RoomVersionId>,
) -> Result<Verified> { ) -> Result<Verified> {
let room_version = room_version.unwrap_or(&RoomVersionId::V11); let room_version = room_version.unwrap_or(&RoomVersionId::V12);
let keys = self.get_event_keys(event, room_version).await?; let keys = self.get_event_keys(event, room_version).await?;
ruma::signatures::verify_event(&keys, event, room_version).map_err(Into::into) ruma::signatures::verify_event(&keys, event, room_version).map_err(Into::into)
} }
@ -63,7 +72,7 @@ pub async fn verify_json(
event: &CanonicalJsonObject, event: &CanonicalJsonObject,
room_version: Option<&RoomVersionId>, room_version: Option<&RoomVersionId>,
) -> Result { ) -> Result {
let room_version = room_version.unwrap_or(&RoomVersionId::V11); let room_version = room_version.unwrap_or(&RoomVersionId::V12);
let keys = self.get_event_keys(event, room_version).await?; let keys = self.get_event_keys(event, room_version).await?;
ruma::signatures::verify_json(&keys, event.clone()).map_err(Into::into) ruma::signatures::verify_json(&keys, event.clone()).map_err(Into::into)
} }

View file

@ -422,7 +422,7 @@ impl Service {
pub fn all_device_ids<'a>( pub fn all_device_ids<'a>(
&'a self, &'a self,
user_id: &'a UserId, user_id: &'a UserId,
) -> impl Stream<Item = &DeviceId> + Send + 'a { ) -> impl Stream<Item = &'a DeviceId> + Send + 'a {
let prefix = (user_id, Interfix); let prefix = (user_id, Interfix);
self.db self.db
.userdeviceid_metadata .userdeviceid_metadata
@ -770,7 +770,7 @@ impl Service {
user_id: &'a UserId, user_id: &'a UserId,
from: u64, from: u64,
to: Option<u64>, to: Option<u64>,
) -> impl Stream<Item = &UserId> + Send + 'a { ) -> impl Stream<Item = &'a UserId> + Send + 'a {
self.keys_changed_user_or_room(user_id.as_str(), from, to) self.keys_changed_user_or_room(user_id.as_str(), from, to)
.map(|(user_id, ..)| user_id) .map(|(user_id, ..)| user_id)
} }
@ -781,7 +781,7 @@ impl Service {
room_id: &'a RoomId, room_id: &'a RoomId,
from: u64, from: u64,
to: Option<u64>, to: Option<u64>,
) -> impl Stream<Item = (&UserId, u64)> + Send + 'a { ) -> impl Stream<Item = (&'a UserId, u64)> + Send + 'a {
self.keys_changed_user_or_room(room_id.as_str(), from, to) self.keys_changed_user_or_room(room_id.as_str(), from, to)
} }
@ -790,7 +790,7 @@ impl Service {
user_or_room_id: &'a str, user_or_room_id: &'a str,
from: u64, from: u64,
to: Option<u64>, to: Option<u64>,
) -> impl Stream<Item = (&UserId, u64)> + Send + 'a { ) -> impl Stream<Item = (&'a UserId, u64)> + Send + 'a {
type KeyVal<'a> = ((&'a str, u64), &'a UserId); type KeyVal<'a> = ((&'a str, u64), &'a UserId);
let to = to.unwrap_or(u64::MAX); let to = to.unwrap_or(u64::MAX);