Compare commits

...

11 commits

Author SHA1 Message Date
nexy7574
5e71470131
fix(hydra): Fix ruma dependency 2025-08-30 17:01:12 +01:00
nexy7574
cfd68efb99
style: Reformat and whatnot 2025-08-30 17:00:31 +01:00
nexy7574
327fa02cd9
feat(hydra): Initial public commit for v12 support
# Conflicts:
#	src/core/info/room_version.rs
#	src/service/rooms/timeline/create.rs
2025-08-30 16:55:21 +01:00
Tom Foster
609e239436 fix(fedora): Correct linting issues in RPM spec file
The Fedora RPM packaging files added in PR #950 weren't passing pre-commit
checks, causing CI failures for any branches rebased after that merge. This
applies prek linting fixes (typo correction, trailing whitespace removal,
and EOF newline) to ensure CI passes for all contributors.
2025-08-30 16:10:41 +01:00
Ginger
34417c96ae Update URL to point at the landing page 2025-08-28 21:10:46 +00:00
Ginger
f33f281edb Update long description to match deb package 2025-08-28 21:10:46 +00:00
Ginger
ddbca59193 Add spec and service files for creating an RPM package 2025-08-28 21:10:46 +00:00
Tom Foster
b5a2e49ae4 fix: Resolve Clippy CI failures from elided lifetime warnings
The latest Rust nightly compiler (2025-08-27) introduced the
elided-named-lifetimes lint which causes Clippy CI checks to fail
when an elided lifetime ('_) resolves to a named lifetime that's
already in scope.

This commit fixes the Clippy warnings by:
- Making lifetime relationships explicit where 'a is already in scope
- Keeping elided lifetimes ('_) in functions without explicit
  lifetime parameters
- Ensuring proper lifetime handling in the database pool module

Affected files (17 total):
- Database map modules: Handle, Key, and KeyVal references in get,
  qry, keys, and stream operations
- Database pool module: into_recv_seek function

This change resolves the CI build failures without changing any
functionality, ensuring the codebase remains compatible with the
latest nightly Clippy checks.
2025-08-28 21:13:19 +01:00
Jade Ellis
37248a4f68
chore: Add reasons for test skips 2025-08-28 20:10:05 +01:00
Tom Foster
dd22325ea2 refactor(ci): Consolidate Rust checks with optimised toolchain setup
Merge rust-checks.yml into prek-checks.yml for a unified workflow that
runs formatting and clippy/test checks in parallel jobs.

Add reusable composite actions:
- setup-rust: Smart Rust toolchain management with caching
  * Uses cargo-binstall for pre-built binary downloads
  * Integrates Mozilla sccache-action for compilation caching
  * Workspace-relative paths for better cache control
  * GitHub token support for improved rate limits
- setup-llvm-with-apt: LLVM installation with native dependencies
- detect-runner-os: Consistent OS detection for cache keys

Key improvements:
- Install prek via cargo-binstall --git (crates.io outdated at v0.0.1)
- Download timelord-cli from cargo-quickinstall
- Set BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10 to avoid rate limit delays
- Default Rust version 1.87.0 with override support
- Remove redundant sccache stats (handled by Mozilla action)

Significantly reduces CI runtime through binary downloads instead of
compilation while maintaining all existing quality checks.
2025-08-28 19:20:14 +01:00
nex
30a56d5cb9
Update renovate.json 2025-08-28 17:15:32 +00:00
93 changed files with 1669 additions and 617 deletions

View file

@ -0,0 +1,39 @@
name: detect-runner-os
description: |
Detect the actual OS name and version of the runner.
Provides separate outputs for name, version, and a combined slug.
outputs:
name:
description: 'OS name (e.g. Ubuntu, Debian)'
value: ${{ steps.detect.outputs.name }}
version:
description: 'OS version (e.g. 22.04, 11)'
value: ${{ steps.detect.outputs.version }}
slug:
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
value: ${{ steps.detect.outputs.slug }}
runs:
using: composite
steps:
- name: Detect runner OS
id: detect
shell: bash
run: |
# Detect OS version (try lsb_release first, fall back to /etc/os-release)
OS_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d'"' -f2)
# Detect OS name and capitalise (try lsb_release first, fall back to /etc/os-release)
OS_NAME=$(lsb_release -is 2>/dev/null || grep "^ID=" /etc/os-release | cut -d'=' -f2 | tr -d '"' | sed 's/\b\(.\)/\u\1/g')
# Create combined slug
OS_SLUG="${OS_NAME}-${OS_VERSION}"
# Set outputs
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
# Log detection results
echo "🔍 Detected Runner OS: ${OS_NAME} ${OS_VERSION}"

View file

@ -0,0 +1,167 @@
name: setup-llvm-with-apt
description: |
Set up LLVM toolchain with APT package management and smart caching.
Supports cross-compilation architectures and additional package installation.
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
inputs:
dpkg-arch:
description: 'Debian architecture for cross-compilation (e.g. arm64)'
required: false
default: ''
extra-packages:
description: 'Additional APT packages to install (space-separated)'
required: false
default: ''
llvm-version:
description: 'LLVM version to install'
required: false
default: '20'
outputs:
llvm-version:
description: 'Installed LLVM version'
value: ${{ steps.configure.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: ./.forgejo/actions/detect-runner-os
- name: Configure cross-compilation architecture
if: inputs.dpkg-arch != ''
shell: bash
run: |
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
# Restrict default sources to amd64
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
# Add ports sources for foreign architecture
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
EOF
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
- name: Start LLVM cache group
shell: bash
run: echo "::group::📦 Restoring LLVM cache"
- name: Check for LLVM cache
id: cache
uses: https://github.com/actions/cache@v4
with:
path: |
/usr/bin/clang-*
/usr/bin/clang++-*
/usr/bin/lld-*
/usr/bin/llvm-*
/usr/lib/llvm-*/
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
/usr/lib/x86_64-linux-gnu/libclang*.so*
/etc/apt/sources.list.d/archive_uri-*
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
key: llvm-${{ steps.runner-os.outputs.slug }}-v${{ inputs.llvm-version }}-v3-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
- name: End LLVM cache group
shell: bash
run: echo "::endgroup::"
- name: Check and install LLVM if needed
id: llvm-setup
shell: bash
run: |
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
# Check both binaries and libraries exist
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
echo "needs-install=false" >> $GITHUB_OUTPUT
else
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
echo "::endgroup::"
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
exit 1
fi
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
echo "needs-install=true" >> $GITHUB_OUTPUT
fi
- name: Prepare for additional packages
if: inputs.extra-packages != ''
shell: bash
run: |
# Update APT if LLVM was cached (installer script already does apt-get update)
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
sudo apt-get update
echo "::endgroup::"
fi
echo "::group::📦 Installing additional packages"
- name: Install additional packages
if: inputs.extra-packages != ''
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
with:
packages: ${{ inputs.extra-packages }}
version: 1.0
- name: End package installation group
if: inputs.extra-packages != ''
shell: bash
run: echo "::endgroup::"
- name: Configure LLVM environment
id: configure
shell: bash
run: |
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
# Create symlinks
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
echo " ✓ Created symlinks"
# Setup library paths
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
if [ -d "$LLVM_LIB_PATH" ]; then
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
sudo ldconfig
echo " ✓ Configured library paths"
else
# Fallback to standard library location
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
echo " ✓ Using fallback library path"
fi
fi
# Set output
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
echo "::endgroup::"
echo "✅ LLVM ready: $(clang --version | head -1)"

View file

@ -0,0 +1,236 @@
name: setup-rust
description: |
Set up Rust toolchain with sccache for compilation caching.
Respects rust-toolchain.toml by default or accepts explicit version override.
inputs:
cache-key-suffix:
description: 'Optional suffix for cache keys (e.g. platform identifier)'
required: false
default: ''
rust-components:
description: 'Additional Rust components to install (space-separated)'
required: false
default: ''
rust-target:
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
required: false
default: ''
rust-version:
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
required: false
default: '1.87.0'
sccache-cache-limit:
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
required: false
default: '2G'
github-token:
description: 'GitHub token for downloading sccache from GitHub releases'
required: false
default: ''
outputs:
rust-version:
description: 'Installed Rust version'
value: ${{ steps.rust-setup.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: ./.forgejo/actions/detect-runner-os
- name: Configure Cargo environment
shell: bash
run: |
# Use workspace-relative paths for better control and consistency
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
# Limit binstall resolution timeout to avoid GitHub rate limit delays
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
# Ensure directories exist for first run
mkdir -p "${{ github.workspace }}/.cargo"
mkdir -p "${{ github.workspace }}/.sccache"
mkdir -p "${{ github.workspace }}/target"
mkdir -p "${{ github.workspace }}/.rustup"
- name: Start cache restore group
shell: bash
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
- name: Cache Cargo registry and git
id: registry-cache
uses: https://github.com/actions/cache@v4
with:
path: |
.cargo/registry/index
.cargo/registry/cache
.cargo/git/db
# Registry cache saved per workflow, restored from any workflow's cache
# Each workflow maintains its own registry that accumulates its needed crates
key: cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ github.workflow }}
restore-keys: |
cargo-registry-${{ steps.runner-os.outputs.slug }}-
- name: Cache toolchain binaries
id: toolchain-cache
uses: https://github.com/actions/cache@v4
with:
path: |
.cargo/bin
.rustup/toolchains
.rustup/update-hashes
# Shared toolchain cache across all Rust versions
key: toolchain-${{ steps.runner-os.outputs.slug }}
- name: Debug GitHub token availability
shell: bash
run: |
if [ -z "${{ inputs.github-token }}" ]; then
echo "⚠️ No GitHub token provided - sccache will use fallback download method"
else
echo "✅ GitHub token provided for sccache"
fi
- name: Setup sccache
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
with:
token: ${{ inputs.github-token }}
- name: Cache build artifacts
id: build-cache
uses: https://github.com/actions/cache@v4
with:
path: |
target/**/deps
!target/**/deps/*.rlib
target/**/build
target/**/.fingerprint
target/**/incremental
target/**/*.d
/timelord/
# Build artifacts - cache per code change, restore from deps when code changes
key: >-
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
restore-keys: |
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
- name: End cache restore group
shell: bash
run: echo "::endgroup::"
- name: Setup Rust toolchain
shell: bash
run: |
# Install rustup if not already cached
if ! command -v rustup &> /dev/null; then
echo "::group::📦 Installing rustup"
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
source "$CARGO_HOME/env"
echo "::endgroup::"
else
echo "✅ rustup already available"
fi
# Setup the appropriate Rust version
if [[ -n "${{ inputs.rust-version }}" ]]; then
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
# Set override first to prevent rust-toolchain.toml from auto-installing
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
# Check if we need to install/update the toolchain
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
rustup update ${{ inputs.rust-version }}
else
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
fi
else
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
rustup show
fi
echo "::endgroup::"
- name: Configure PATH and install tools
shell: bash
env:
GITHUB_TOKEN: ${{ inputs.github-token }}
run: |
# Add .cargo/bin to PATH permanently for all subsequent steps
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
# Install cargo-binstall for fast binary installations
if command -v cargo-binstall &> /dev/null; then
echo "✅ cargo-binstall already available"
else
echo "::group::📦 Installing cargo-binstall"
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
echo "::endgroup::"
fi
if command -v prek &> /dev/null; then
echo "✅ prek already available"
else
echo "::group::📦 Installing prek"
# prek isn't regularly published to crates.io, so we use git source
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
echo "::endgroup::"
fi
if command -v timelord &> /dev/null; then
echo "✅ timelord already available"
else
echo "::group::📦 Installing timelord"
cargo-binstall -y --no-symlinks timelord-cli
echo "::endgroup::"
fi
- name: Configure sccache environment
shell: bash
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
# Configure incremental compilation GC
# If we restored from old cache (partial hit), clean up aggressively
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
echo "♻️ Partial cache hit - enabling cache cleanup"
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
fi
- name: Install Rust components
if: inputs.rust-components != ''
shell: bash
run: |
echo "📦 Installing components: ${{ inputs.rust-components }}"
rustup component add ${{ inputs.rust-components }}
- name: Install Rust target
if: inputs.rust-target != ''
shell: bash
run: |
echo "📦 Installing target: ${{ inputs.rust-target }}"
rustup target add ${{ inputs.rust-target }}
- name: Output version and summary
id: rust-setup
shell: bash
run: |
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
echo "📋 Setup complete:"
echo " Rust: $(rustc --version)"
echo " Cargo: $(cargo --version)"
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"

View file

@ -2,7 +2,6 @@ name: Checks / Prek
on:
push:
pull_request:
permissions:
contents: read
@ -17,18 +16,64 @@ jobs:
with:
persist-credentials: false
- name: Install uv
uses: https://github.com/astral-sh/setup-uv@v5
- name: Setup Rust nightly
uses: ./.forgejo/actions/setup-rust
with:
enable-cache: true
ignore-nothing-to-cache: true
cache-dependency-glob: ''
rust-version: nightly
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run prek
run: |
uvx prek run \
prek run \
--all-files \
--hook-stage manual \
--show-diff-on-failure \
--color=always \
-v
- name: Check Rust formatting
run: |
cargo +nightly fmt --all -- --check && \
echo "✅ Formatting check passed" || \
exit 1
clippy-and-tests:
name: Clippy and Cargo Tests
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup LLVM
uses: ./.forgejo/actions/setup-llvm-with-apt
with:
extra-packages: liburing-dev liburing2
- name: Setup Rust with caching
uses: ./.forgejo/actions/setup-rust
with:
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run Clippy lints
run: |
cargo clippy \
--workspace \
--features full \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Run Cargo tests
run: |
cargo test \
--workspace \
--features full \
--locked \
--profile test \
--all-targets \
--no-fail-fast

View file

@ -1,144 +0,0 @@
name: Checks / Rust
on:
push:
jobs:
format:
name: Format
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
with:
toolchain: "nightly"
components: "rustfmt"
- name: Check formatting
run: |
cargo +nightly fmt --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Clippy
run: |
cargo clippy \
--workspace \
--features full \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Show sccache stats
if: always()
run: sccache --show-stats
cargo-test:
name: Cargo Test
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Cargo Test
run: |
cargo test \
--workspace \
--features full \
--locked \
--profile test \
--all-targets \
--no-fail-fast
- name: Show sccache stats
if: always()
run: sccache --show-stats

22
Cargo.lock generated
View file

@ -4058,7 +4058,7 @@ checksum = "88f8660c1ff60292143c98d08fc6e2f654d722db50410e3f3797d40baaf9d8f3"
[[package]]
name = "ruma"
version = "0.10.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"assign",
"js_int",
@ -4078,7 +4078,7 @@ dependencies = [
[[package]]
name = "ruma-appservice-api"
version = "0.10.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"js_int",
"ruma-common",
@ -4090,7 +4090,7 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.18.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"as_variant",
"assign",
@ -4113,7 +4113,7 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"as_variant",
"base64 0.22.1",
@ -4145,7 +4145,7 @@ dependencies = [
[[package]]
name = "ruma-events"
version = "0.28.1"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"as_variant",
"indexmap 2.10.0",
@ -4170,7 +4170,7 @@ dependencies = [
[[package]]
name = "ruma-federation-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"bytes",
"headers",
@ -4192,7 +4192,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.9.5"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"js_int",
"thiserror 2.0.12",
@ -4201,7 +4201,7 @@ dependencies = [
[[package]]
name = "ruma-identity-service-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"js_int",
"ruma-common",
@ -4211,7 +4211,7 @@ dependencies = [
[[package]]
name = "ruma-macros"
version = "0.13.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"cfg-if",
"proc-macro-crate",
@ -4226,7 +4226,7 @@ dependencies = [
[[package]]
name = "ruma-push-gateway-api"
version = "0.9.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"js_int",
"ruma-common",
@ -4238,7 +4238,7 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.15.0"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=b753738047d1f443aca870896ef27ecaacf027da#b753738047d1f443aca870896ef27ecaacf027da"
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=191e5c541e8339080bebb5ac6855a682330bb886#191e5c541e8339080bebb5ac6855a682330bb886"
dependencies = [
"base64 0.22.1",
"ed25519-dalek",

View file

@ -352,7 +352,7 @@ version = "0.1.2"
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
#branch = "conduwuit-changes"
rev = "b753738047d1f443aca870896ef27ecaacf027da"
rev = "191e5c541e8339080bebb5ac6855a682330bb886"
features = [
"compat",
"rand",

68
fedora/conduwuit.service Normal file
View file

@ -0,0 +1,68 @@
[Unit]
Description=Continuwuity - Matrix homeserver
Documentation=https://continuwuity.org/
Wants=network-online.target
After=network-online.target
Alias=matrix-conduwuit.service
[Service]
DynamicUser=yes
User=conduwuit
Group=conduwuit
Type=notify
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
ExecStart=/usr/bin/conduwuit
AmbientCapabilities=
CapabilityBoundingSet=
DevicePolicy=closed
LockPersonality=yes
MemoryDenyWriteExecute=yes
NoNewPrivileges=yes
#ProcSubset=pid
ProtectClock=yes
ProtectControlGroups=yes
ProtectHome=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectKernelTunables=yes
ProtectProc=invisible
ProtectSystem=strict
PrivateDevices=yes
PrivateMounts=yes
PrivateTmp=yes
PrivateUsers=yes
PrivateIPC=yes
RemoveIPC=yes
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
StateDirectory=conduwuit
ConfigurationDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Restart=on-failure
RestartSec=5
TimeoutStopSec=2m
TimeoutStartSec=2m
StartLimitInterval=1m
StartLimitBurst=5
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,80 @@
# This should be run using rpkg-util: https://docs.pagure.org/rpkg-util
# it requires Internet access and is not suitable for Fedora main repos
# TODO: rpkg-util is no longer maintained, find a replacement
Name: continuwuity
Version: {{{ git_repo_version }}}
Release: 1%{?dist}
Summary: Very cool Matrix chat homeserver written in Rust
License: Apache-2.0 AND MIT
URL: https://continuwuity.org
VCS: {{{ git_repo_vcs }}}
Source: {{{ git_repo_pack }}}
BuildRequires: cargo-rpm-macros >= 25
BuildRequires: systemd-rpm-macros
# Needed to build rust-librocksdb-sys
BuildRequires: clang
BuildRequires: liburing-devel
Requires: liburing
Requires: glibc
Requires: libstdc++
%global _description %{expand:
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
%description %{_description}
%prep
{{{ git_repo_setup_macro }}}
%cargo_prep -N
# Perform an online build so Git dependencies can be retrieved
sed -i 's/^offline = true$//' .cargo/config.toml
%build
%cargo_build
# Here's the one legally required mystery incantation in this file.
# Some of our dependencies have source files which are (for some reason) marked as executable.
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
# at the end of the build step, and then the BRP shebang mangling script checks
# the entire buildroot to find executable files, and fails the build because
# it thinks Rust's file attributes are shebangs because they start with `#!`.
# So we have to clear the executable bit on all of them before that happens.
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
# TODO: this fails currently because it's forced to run in offline mode
# {cargo_license -- --no-dev} > LICENSE.dependencies
%install
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
install -Dpm0644 fedora/conduwuit.service -t %{buildroot}%{_unitdir}
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
%files
%license LICENSE
%license src/core/matrix/state_res/LICENSE
%doc CODE_OF_CONDUCT.md
%doc CONTRIBUTING.md
%doc README.md
%doc SECURITY.md
%config %{_sysconfdir}/conduwuit/conduwuit.toml
%{_bindir}/conduwuit
%{_unitdir}/conduwuit.service
# Do not create /var/lib/conduwuit, systemd will create it if necessary
%post
%systemd_post conduwuit.service
%preun
%systemd_preun conduwuit.service
%postun
%systemd_postun_with_restart conduwuit.service
%changelog
{{{ git_repo_changelog }}}

View file

@ -13,8 +13,8 @@
"enabled": true
},
"labels": [
"dependencies",
"github_actions"
"Dependencies",
"Dependencies/Renovate"
],
"ignoreDeps": [
"tikv-jemallocator",

View file

@ -756,7 +756,7 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
.build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content),
&user_id,
&room_id,
Some(&room_id),
&state_lock,
)
.await?;
@ -901,7 +901,13 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
);
let redaction_event_id = {
let state_lock = self.services.rooms.state.mutex.lock(event.room_id()).await;
let state_lock = self
.services
.rooms
.state
.mutex
.lock(&event.room_id_or_hash())
.await;
self.services
.rooms
@ -915,7 +921,7 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
})
},
event.sender(),
event.room_id(),
Some(&event.room_id_or_hash()),
&state_lock,
)
.await?

View file

@ -929,7 +929,7 @@ pub async fn full_user_deactivate(
.build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content),
user_id,
room_id,
Some(room_id),
&state_lock,
)
.await

View file

@ -69,7 +69,7 @@ pub(crate) async fn get_context_route(
let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?;
if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id {
if base_pdu.room_id_or_hash() != *room_id || base_pdu.event_id != *event_id {
return Err!(Request(NotFound("Base event not found.")));
}

View file

@ -49,7 +49,7 @@ pub(crate) async fn ban_user_route(
..current_member_content
}),
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View file

@ -128,12 +128,12 @@ pub(crate) async fn invite_helper(
.create_hash_and_sign_event(
PduBuilder::state(user_id.to_string(), &content),
sender_user,
room_id,
Some(room_id),
&state_lock,
)
.await?;
let invite_room_state = services.rooms.state.summary_stripped(&pdu).await;
let invite_room_state = services.rooms.state.summary_stripped(&pdu, room_id).await;
drop(state_lock);
@ -227,7 +227,7 @@ pub(crate) async fn invite_helper(
.build_and_append_pdu(
PduBuilder::state(user_id.to_string(), &content),
sender_user,
room_id,
Some(room_id),
&state_lock,
)
.await?;

View file

@ -18,7 +18,7 @@ use conduwuit::{
},
warn,
};
use futures::{FutureExt, StreamExt};
use futures::{FutureExt, StreamExt, TryFutureExt};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
RoomVersionId, UserId,
@ -550,12 +550,20 @@ async fn join_room_by_id_helper_remote(
.iter()
.stream()
.then(|pdu| {
debug!(?pdu, "Validating send_join response room_state event");
services
.server_keys
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
.inspect_err(|e| {
debug_warn!(
"Could not validate send_join response room_state event: {e:?}"
);
})
.inspect(|_| debug!("Completed validating send_join response room_state event"))
})
.ready_filter_map(Result::ok)
.fold(HashMap::new(), |mut state, (event_id, value)| async move {
debug!(?event_id, "Processing send_join response room_state event");
let pdu = match PduEvent::from_id_val(&event_id, value.clone()) {
| Ok(pdu) => pdu,
| Err(e) => {
@ -563,9 +571,10 @@ async fn join_room_by_id_helper_remote(
return state;
},
};
debug!(event_id = ?event_id.clone(), "Adding PDU outlier for send_join response room_state event");
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
if let Some(state_key) = &pdu.state_key {
debug!(?state_key, "Creating shortstatekey for state event in send_join response");
let shortstatekey = services
.rooms
.short
@ -574,7 +583,7 @@ async fn join_room_by_id_helper_remote(
state.insert(shortstatekey, pdu.event_id.clone());
}
debug!("Completed send_join response");
state
})
.await;
@ -615,6 +624,9 @@ async fn join_room_by_id_helper_remote(
&parsed_join_pdu,
None, // TODO: third party invite
|k, s| state_fetch(k.clone(), s.into()),
&state_fetch(StateEventType::RoomCreate, "".into())
.await
.expect("create event is missing from send_join auth"),
)
.await
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
@ -662,7 +674,7 @@ async fn join_room_by_id_helper_remote(
let statehash_after_join = services
.rooms
.state
.append_to_state(&parsed_join_pdu)
.append_to_state(&parsed_join_pdu, room_id)
.await?;
info!("Appending new room join event");
@ -674,6 +686,7 @@ async fn join_room_by_id_helper_remote(
join_event,
once(parsed_join_pdu.event_id.borrow()),
&state_lock,
room_id,
)
.await?;
@ -773,7 +786,7 @@ async fn join_room_by_id_helper_local(
.build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content),
sender_user,
room_id,
Some(room_id),
&state_lock,
)
.await

View file

@ -54,7 +54,7 @@ pub(crate) async fn kick_user_route(
..event
}),
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View file

@ -373,7 +373,7 @@ async fn knock_room_helper_local(
.build_and_append_pdu(
PduBuilder::state(sender_user.to_string(), &content),
sender_user,
room_id,
Some(room_id),
&state_lock,
)
.await
@ -502,6 +502,7 @@ async fn knock_room_helper_local(
knock_event,
once(parsed_knock_pdu.event_id.borrow()),
&state_lock,
room_id,
)
.await?;
@ -672,7 +673,7 @@ async fn knock_room_helper_remote(
let statehash_after_knock = services
.rooms
.state
.append_to_state(&parsed_knock_pdu)
.append_to_state(&parsed_knock_pdu, room_id)
.await?;
info!("Updating membership locally to knock state with provided stripped state events");
@ -701,6 +702,7 @@ async fn knock_room_helper_remote(
knock_event,
once(parsed_knock_pdu.event_id.borrow()),
&state_lock,
room_id,
)
.await?;

View file

@ -206,7 +206,7 @@ pub async fn leave_room(
..event
}),
user_id,
room_id,
Some(room_id),
&state_lock,
)
.await?;

View file

@ -70,9 +70,10 @@ pub(crate) async fn banned_room_check(
if let Some(room_id) = room_id {
if services.rooms.metadata.is_banned(room_id).await
|| services
|| (room_id.server_name().is_some()
&& services
.moderation
.is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid"))
.is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")))
{
warn!(
"User {user_id} who is not an admin attempted to send an invite for or \

View file

@ -47,7 +47,7 @@ pub(crate) async fn unban_user_route(
..current_member_content
}),
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View file

@ -310,7 +310,7 @@ pub(crate) async fn visibility_filter(
services
.rooms
.state_accessor
.user_can_see_event(user_id, pdu.room_id(), pdu.event_id())
.user_can_see_event(user_id, &pdu.room_id_or_hash(), pdu.event_id())
.await
.then_some(item)
}

View file

@ -423,7 +423,7 @@ pub async fn update_all_rooms(
if let Err(e) = services
.rooms
.timeline
.build_and_append_pdu(pdu_builder, user_id, room_id, &state_lock)
.build_and_append_pdu(pdu_builder, user_id, Some(room_id), &state_lock)
.await
{
warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}");

View file

@ -36,7 +36,7 @@ pub(crate) async fn redact_event_route(
})
},
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View file

@ -223,7 +223,7 @@ async fn visibility_filter<Pdu: Event + Send + Sync>(
services
.rooms
.state_accessor
.user_can_see_event(sender_user, pdu.room_id(), pdu.event_id())
.user_can_see_event(sender_user, &pdu.room_id_or_hash(), pdu.event_id())
.await
.then_some(item)
}

View file

@ -2,7 +2,7 @@ use std::{fmt::Write as _, ops::Mul, time::Duration};
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{Err, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
use conduwuit::{Err, Event, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
use conduwuit_service::Services;
use rand::Rng;
use ruma::{
@ -200,7 +200,7 @@ async fn is_event_report_valid(
valid"
);
if room_id != pdu.room_id {
if room_id != pdu.room_id_or_hash() {
return Err!(Request(NotFound("Event ID does not belong to the reported room",)));
}

View file

@ -2,7 +2,7 @@ use std::collections::BTreeMap;
use axum::extract::State;
use conduwuit::{
Err, Result, debug_info, debug_warn, err, info,
Err, Result, RoomVersion, debug, debug_info, debug_warn, err, info,
matrix::{StateKey, pdu::PduBuilder},
warn,
};
@ -68,51 +68,6 @@ pub(crate) async fn create_room_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
let room_id: OwnedRoomId = match &body.room_id {
| Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?,
| _ => RoomId::new(&services.server.name),
};
// check if room ID doesn't already exist instead of erroring on auth check
if services.rooms.short.get_shortroomid(&room_id).await.is_ok() {
return Err!(Request(RoomInUse("Room with that custom room ID already exists",)));
}
if body.visibility == room::Visibility::Public
&& services.server.config.lockdown_public_room_directory
&& !services.users.is_admin(sender_user).await
&& body.appservice_info.is_none()
{
warn!(
"Non-admin user {sender_user} tried to publish {room_id} to the room directory \
while \"lockdown_public_room_directory\" is enabled"
);
if services.server.config.admin_room_notices {
services
.admin
.notice(&format!(
"Non-admin user {sender_user} tried to publish {room_id} to the room \
directory while \"lockdown_public_room_directory\" is enabled"
))
.await;
}
return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed")));
}
let _short_id = services
.rooms
.short
.get_or_create_shortroomid(&room_id)
.await;
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
let alias: Option<OwnedRoomAliasId> = match body.room_alias_name.as_ref() {
| Some(alias) =>
Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?),
| _ => None,
};
let room_version = match body.room_version.clone() {
| Some(room_version) =>
if services.server.supported_room_version(&room_version) {
@ -124,6 +79,51 @@ pub(crate) async fn create_room_route(
},
| None => services.server.config.default_room_version.clone(),
};
let room_features = RoomVersion::new(&room_version)?;
let room_id: Option<OwnedRoomId> = match room_features.room_ids_as_hashes {
| true => None,
| false => match &body.room_id {
| Some(custom_room_id) => Some(custom_room_id_check(&services, custom_room_id)?),
| None => Some(RoomId::new(services.globals.server_name())),
},
};
// check if room ID doesn't already exist instead of erroring on auth check
if let Some(ref room_id) = room_id {
if services.rooms.short.get_shortroomid(room_id).await.is_ok() {
return Err!(Request(RoomInUse("Room with that custom room ID already exists",)));
}
}
if body.visibility == room::Visibility::Public
&& services.server.config.lockdown_public_room_directory
&& !services.users.is_admin(sender_user).await
&& body.appservice_info.is_none()
{
warn!(
"Non-admin user {sender_user} tried to publish {room_id:?} to the room directory \
while \"lockdown_public_room_directory\" is enabled"
);
if services.server.config.admin_room_notices {
services
.admin
.notice(&format!(
"Non-admin user {sender_user} tried to publish {room_id:?} to the room \
directory while \"lockdown_public_room_directory\" is enabled"
))
.await;
}
return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed")));
}
let alias: Option<OwnedRoomAliasId> = match body.room_alias_name.as_ref() {
| Some(alias) =>
Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?),
| _ => None,
};
let create_content = match &body.creation_content {
| Some(content) => {
@ -156,6 +156,10 @@ pub(crate) async fn create_room_route(
.try_into()
.map_err(|e| err!(Request(BadJson("Invalid creation content: {e}"))))?,
);
if room_version == V12 {
// TODO(hydra): v12 rooms cannot be federated until they are stable.
content.insert("m.federate".into(), false.into());
}
content
},
| None => {
@ -164,18 +168,32 @@ pub(crate) async fn create_room_route(
let content = match room_version {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
RoomCreateEventContent::new_v1(sender_user.to_owned()),
| _ => RoomCreateEventContent::new_v11(),
| V11 => RoomCreateEventContent::new_v11(),
| _ => RoomCreateEventContent::new_v12(),
};
let mut content =
serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())
.unwrap();
serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())?;
content.insert("room_version".into(), json!(room_version.as_str()).try_into()?);
if room_version == V12 {
// TODO(hydra): v12 rooms cannot be federated until they are stable.
content.insert("m.federate".into(), false.into());
}
content
},
};
let state_lock = match room_id.clone() {
| Some(room_id) => services.rooms.state.mutex.lock(&room_id).await,
| None => {
let temp_room_id = RoomId::new(services.globals.server_name());
debug_info!("Locking temporary room state mutex for {temp_room_id}");
services.rooms.state.mutex.lock(&temp_room_id).await
},
};
// 1. The room create event
services
debug!("Creating room create event for {sender_user} in room {room_id:?}");
let create_event_id = services
.rooms
.timeline
.build_and_append_pdu(
@ -186,13 +204,26 @@ pub(crate) async fn create_room_route(
..Default::default()
},
sender_user,
&room_id,
None,
&state_lock,
)
.boxed()
.await?;
debug!("Created room create event with ID {}", create_event_id);
let room_id = match room_id {
| Some(room_id) => room_id,
| None => {
let as_room_id = create_event_id.as_str().replace('$', "!");
debug_info!("Creating room with v12 room ID {as_room_id}");
RoomId::parse(&as_room_id)?.to_owned()
},
};
drop(state_lock);
debug!("Room created with ID {room_id}");
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
// 2. Let the room creator join
debug_info!("Joining {sender_user} to room {room_id}");
services
.rooms
.timeline
@ -205,7 +236,7 @@ pub(crate) async fn create_room_route(
..RoomMemberEventContent::new(MembershipState::Join)
}),
sender_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -235,10 +266,28 @@ pub(crate) async fn create_room_route(
}
}
let mut creators: Vec<OwnedUserId> = vec![sender_user.to_owned()];
if let Some(additional_creators) = create_content.get("additional_creators") {
if let Some(additional_creators) = additional_creators.as_array() {
for creator in additional_creators {
if let Some(creator) = creator.as_str() {
if let Ok(creator) = OwnedUserId::parse(creator) {
creators.push(creator.clone());
users.insert(creator.clone(), int!(100));
}
}
}
}
}
if !(RoomVersion::new(&room_version)?).explicitly_privilege_room_creators {
creators.clear();
}
let power_levels_content = default_power_levels_content(
body.power_level_content_override.as_ref(),
&body.visibility,
users,
creators,
)?;
services
@ -252,7 +301,7 @@ pub(crate) async fn create_room_route(
..Default::default()
},
sender_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -269,7 +318,7 @@ pub(crate) async fn create_room_route(
alt_aliases: vec![],
}),
sender_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -292,7 +341,7 @@ pub(crate) async fn create_room_route(
}),
),
sender_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -308,7 +357,7 @@ pub(crate) async fn create_room_route(
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
),
sender_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -327,7 +376,7 @@ pub(crate) async fn create_room_route(
}),
),
sender_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -363,7 +412,7 @@ pub(crate) async fn create_room_route(
services
.rooms
.timeline
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
.build_and_append_pdu(pdu_builder, sender_user, Some(&room_id), &state_lock)
.boxed()
.await?;
}
@ -376,7 +425,7 @@ pub(crate) async fn create_room_route(
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())),
sender_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -390,7 +439,7 @@ pub(crate) async fn create_room_route(
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomTopicEventContent { topic: topic.clone() }),
sender_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -450,6 +499,7 @@ fn default_power_levels_content(
power_level_content_override: Option<&Raw<RoomPowerLevelsEventContent>>,
visibility: &room::Visibility,
users: BTreeMap<OwnedUserId, Int>,
creators: Vec<OwnedUserId>,
) -> Result<serde_json::Value> {
let mut power_levels_content =
serde_json::to_value(RoomPowerLevelsEventContent { users, ..Default::default() })
@ -499,6 +549,19 @@ fn default_power_levels_content(
}
}
if !creators.is_empty() {
// Raise the default power level of tombstone to 150
power_levels_content["events"]["m.room.tombstone"] =
serde_json::to_value(150).expect("150 is valid Value");
for creator in creators {
// Omit creators from the power level list altogether
power_levels_content["users"]
.as_object_mut()
.expect("users is an object")
.remove(creator.as_str());
}
}
Ok(power_levels_content)
}

View file

@ -34,7 +34,7 @@ pub(crate) async fn get_room_event_route(
}
debug_assert!(
event.event_id() == event_id && event.room_id() == room_id,
event.event_id() == event_id && event.room_id_or_hash() == *room_id,
"Fetched PDU must match requested"
);

View file

@ -91,7 +91,7 @@ pub(crate) async fn upgrade_room_route(
replacement_room: replacement_room.clone(),
}),
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;
@ -173,7 +173,7 @@ pub(crate) async fn upgrade_room_route(
timestamp: None,
},
sender_user,
&replacement_room,
Some(&replacement_room),
&state_lock,
)
.boxed()
@ -204,7 +204,7 @@ pub(crate) async fn upgrade_room_route(
timestamp: None,
},
sender_user,
&replacement_room,
Some(&replacement_room),
&state_lock,
)
.boxed()
@ -243,7 +243,7 @@ pub(crate) async fn upgrade_room_route(
..Default::default()
},
sender_user,
&replacement_room,
Some(&replacement_room),
&state_lock,
)
.boxed()
@ -302,7 +302,7 @@ pub(crate) async fn upgrade_room_route(
..power_levels_event_content
}),
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.boxed()
@ -352,7 +352,7 @@ pub(crate) async fn upgrade_room_route(
..Default::default()
},
sender_user,
space_id,
Some(space_id),
&state_lock,
)
.boxed()
@ -376,7 +376,7 @@ pub(crate) async fn upgrade_room_route(
..Default::default()
},
sender_user,
space_id,
Some(space_id),
&state_lock,
)
.boxed()

View file

@ -80,7 +80,7 @@ pub(crate) async fn send_message_event_route(
..Default::default()
},
sender_user,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View file

@ -201,7 +201,7 @@ async fn send_state_event_for_key_helper(
..Default::default()
},
sender,
room_id,
Some(room_id),
&state_lock,
)
.await?;

View file

@ -457,7 +457,7 @@ async fn handle_left_room(
state_key: Some(sender_user.as_str().into()),
unsigned: None,
// The following keys are dropped on conversion
room_id: room_id.clone(),
room_id: Some(room_id.clone()),
prev_events: vec![],
depth: uint!(1),
auth_events: vec![],

View file

@ -2,7 +2,7 @@ use std::cmp;
use axum::extract::State;
use conduwuit::{
PduCount, Result,
Event, PduCount, Result,
utils::{IterStream, ReadyExt, stream::TryTools},
};
use futures::{FutureExt, StreamExt, TryStreamExt};
@ -68,7 +68,7 @@ pub(crate) async fn get_backfill_route(
Ok(services
.rooms
.state_accessor
.server_can_see_event(body.origin(), &pdu.room_id, &pdu.event_id)
.server_can_see_event(body.origin(), &pdu.room_id_or_hash(), &pdu.event_id)
.await
.then_some(pdu))
})

View file

@ -122,7 +122,7 @@ pub(crate) async fn create_join_event_template_route(
..RoomMemberEventContent::new(MembershipState::Join)
}),
&body.user_id,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View file

@ -95,7 +95,7 @@ pub(crate) async fn create_knock_event_template_route(
&RoomMemberEventContent::new(MembershipState::Knock),
),
&body.user_id,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View file

@ -45,7 +45,7 @@ pub(crate) async fn create_leave_event_template_route(
&RoomMemberEventContent::new(MembershipState::Leave),
),
&body.user_id,
&body.room_id,
Some(&body.room_id),
&state_lock,
)
.await?;

View file

@ -138,6 +138,7 @@ async fn handle(
pdus: impl Stream<Item = Pdu> + Send,
edus: impl Stream<Item = Edu> + Send,
) -> Result<ResolvedMap> {
// TODO(hydra): Does having no room ID break this?
// group pdus by room
let pdus = pdus
.collect()
@ -186,6 +187,7 @@ async fn handle_room(
.lock(&room_id)
.await;
// TODO(hydra): We might be missing a room ID
let room_id = &room_id;
pdus.try_stream()
.and_then(|(_, event_id, value)| async move {

View file

@ -175,7 +175,11 @@ pub(crate) async fn create_knock_event_v1_route(
.send_pdu_room(&body.room_id, &pdu_id)
.await?;
let knock_room_state = services.rooms.state.summary_stripped(&pdu).await;
let knock_room_state = services
.rooms
.state
.summary_stripped(&pdu, &body.room_id)
.await;
Ok(send_knock::v1::Response { knock_room_state })
}

View file

@ -18,7 +18,7 @@ pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[
/// Experimental, partially supported room versions
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5, RoomVersionId::V12];
type RoomVersion = (RoomVersionId, RoomVersionStability);

View file

@ -10,7 +10,7 @@ mod unsigned;
use std::fmt::Debug;
use ruma::{
CanonicalJsonObject, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId,
CanonicalJsonObject, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId,
RoomVersionId, UserId, events::TimelineEventType,
};
use serde::Deserialize;
@ -168,7 +168,12 @@ pub trait Event: Clone + Debug {
fn redacts(&self) -> Option<&EventId>;
/// The `RoomId` of this event.
fn room_id(&self) -> &RoomId;
fn room_id(&self) -> Option<&RoomId>;
/// The `RoomId` or hash of this event.
/// This should only be preferred over room_id() if the event is a v12
/// create event.
fn room_id_or_hash(&self) -> OwnedRoomId;
/// The `UserId` of this event.
fn sender(&self) -> &UserId;

View file

@ -32,12 +32,16 @@ impl<E: Event> Matches<E> for &RoomEventFilter {
}
fn matches_room<E: Event>(event: &E, filter: &RoomEventFilter) -> bool {
if filter.not_rooms.iter().any(is_equal_to!(event.room_id())) {
if filter
.not_rooms
.iter()
.any(is_equal_to!(event.room_id().unwrap()))
{
return false;
}
if let Some(rooms) = filter.rooms.as_ref() {
if !rooms.iter().any(is_equal_to!(event.room_id())) {
if !rooms.iter().any(is_equal_to!(event.room_id().unwrap())) {
return false;
}
}

View file

@ -31,7 +31,7 @@ use crate::Result;
pub struct Pdu {
pub event_id: OwnedEventId,
pub room_id: OwnedRoomId,
pub room_id: Option<OwnedRoomId>,
pub sender: OwnedUserId,
@ -110,7 +110,19 @@ impl Event for Pdu {
fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() }
#[inline]
fn room_id(&self) -> &RoomId { &self.room_id }
fn room_id(&self) -> Option<&RoomId> { self.room_id.as_deref() }
#[inline]
fn room_id_or_hash(&self) -> OwnedRoomId {
if let Some(room_id) = &self.room_id {
room_id.clone()
} else {
let constructed_hash = "!".to_owned() + &self.event_id.as_str()[1..];
RoomId::parse(&constructed_hash)
.expect("event ID can be indexed")
.to_owned()
}
}
#[inline]
fn sender(&self) -> &UserId { &self.sender }
@ -163,7 +175,19 @@ impl Event for &Pdu {
fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() }
#[inline]
fn room_id(&self) -> &RoomId { &self.room_id }
fn room_id(&self) -> Option<&RoomId> { self.room_id.as_ref().map(AsRef::as_ref) }
#[inline]
fn room_id_or_hash(&self) -> OwnedRoomId {
if let Some(room_id) = &self.room_id {
room_id.clone()
} else {
let constructed_hash = "!".to_owned() + &self.event_id.as_str()[1..];
RoomId::parse(&constructed_hash)
.expect("event ID can be indexed")
.to_owned()
}
}
#[inline]
fn sender(&self) -> &UserId { &self.sender }

View file

@ -406,7 +406,7 @@ where
Pdu {
event_id: id.try_into().unwrap(),
room_id: room_id().to_owned(),
room_id: Some(room_id().to_owned()),
sender: sender.to_owned(),
origin_server_ts: ts.try_into().unwrap(),
state_key: state_key.map(Into::into),

View file

@ -2,10 +2,10 @@ use std::{borrow::Borrow, collections::BTreeSet};
use futures::{
Future,
future::{OptionFuture, join3},
future::{OptionFuture, join, join3},
};
use ruma::{
Int, OwnedUserId, RoomVersionId, UserId,
Int, OwnedRoomId, OwnedUserId, RoomVersionId, UserId,
events::room::{
create::RoomCreateEventContent,
join_rules::{JoinRule, RoomJoinRulesEventContent},
@ -44,6 +44,15 @@ struct RoomMemberContentFields {
join_authorised_via_users_server: Option<Raw<OwnedUserId>>,
}
#[derive(Deserialize)]
struct RoomCreateContentFields {
room_version: Option<Raw<RoomVersionId>>,
creator: Option<Raw<IgnoredAny>>,
additional_creators: Option<Vec<Raw<OwnedUserId>>>,
#[serde(rename = "m.federate", default = "ruma::serde::default_true")]
federate: bool,
}
/// For the given event `kind` what are the relevant auth events that are needed
/// to authenticate this `content`.
///
@ -56,16 +65,24 @@ pub fn auth_types_for_event(
sender: &UserId,
state_key: Option<&str>,
content: &RawJsonValue,
room_version: &RoomVersion,
) -> serde_json::Result<Vec<(StateEventType, StateKey)>> {
if kind == &TimelineEventType::RoomCreate {
return Ok(vec![]);
}
let mut auth_types = vec![
let mut auth_types = if room_version.room_ids_as_hashes {
vec![
(StateEventType::RoomPowerLevels, StateKey::new()),
(StateEventType::RoomMember, sender.as_str().into()),
]
} else {
vec![
(StateEventType::RoomPowerLevels, StateKey::new()),
(StateEventType::RoomMember, sender.as_str().into()),
(StateEventType::RoomCreate, StateKey::new()),
];
]
};
if kind == &TimelineEventType::RoomMember {
#[derive(Deserialize)]
@ -141,6 +158,7 @@ pub async fn auth_check<E, F, Fut>(
incoming_event: &E,
current_third_party_invite: Option<&E>,
fetch_state: F,
create_event: &E,
) -> Result<bool, Error>
where
F: Fn(&StateEventType, &str) -> Fut + Send,
@ -169,12 +187,6 @@ where
//
// 1. If type is m.room.create:
if *incoming_event.event_type() == TimelineEventType::RoomCreate {
#[derive(Deserialize)]
struct RoomCreateContentFields {
room_version: Option<Raw<RoomVersionId>>,
creator: Option<Raw<IgnoredAny>>,
}
debug!("start m.room.create check");
// If it has any previous events, reject
@ -184,15 +196,17 @@ where
}
// If the domain of the room_id does not match the domain of the sender, reject
let Some(room_id_server_name) = incoming_event.room_id().server_name() else {
if incoming_event.room_id().is_some() {
let Some(room_id_server_name) = incoming_event.room_id().unwrap().server_name()
else {
warn!("room ID has no servername");
return Ok(false);
};
if room_id_server_name != sender.server_name() {
warn!("servername of room ID does not match servername of sender");
return Ok(false);
}
}
// If content.room_version is present and is not a recognized version, reject
let content: RoomCreateContentFields = from_json_str(incoming_event.content().get())?;
@ -204,7 +218,15 @@ where
return Ok(false);
}
if !room_version.use_room_create_sender {
// TODO(hydra): If the create event has a room_id, reject
if room_version.room_ids_as_hashes && incoming_event.room_id().is_some() {
warn!("this room version does not support room IDs in m.room.create");
return Ok(false);
}
if !room_version.use_room_create_sender
&& !room_version.explicitly_privilege_room_creators
{
// If content has no creator field, reject
if content.creator.is_none() {
warn!("no creator field found in m.room.create content");
@ -216,6 +238,8 @@ where
return Ok(true);
}
// NOTE(hydra): We must have an event ID from this point forward.
/*
// TODO: In the past this code was commented as it caused problems with Synapse. This is no
// longer the case. This needs to be implemented.
@ -242,54 +266,102 @@ where
}
*/
let (room_create_event, power_levels_event, sender_member_event) = join3(
fetch_state(&StateEventType::RoomCreate, ""),
let (power_levels_event, sender_member_event) = join(
// fetch_state(&StateEventType::RoomCreate, ""),
fetch_state(&StateEventType::RoomPowerLevels, ""),
fetch_state(&StateEventType::RoomMember, sender.as_str()),
)
.await;
let room_create_event = match room_create_event {
| None => {
warn!("no m.room.create event in auth chain");
// TODO(hydra): Re-enable <v12 checks
// let room_create_event = match room_create_event {
// | None => {
// // Room was either v11 with no create event, or v12+ room
// if incoming_event.room_id().is_some() {
// // invalid v11
// warn!("no m.room.create event found in claimed state");
// return Ok(false);
// }
// // v12 room
// debug!("no m.room.create event found, assuming v12 room");
// create_event.clone()
// },
// | Some(e) => e,
// };
let room_create_event = create_event.clone();
// Get the content of the room create event, used later.
let room_create_content: RoomCreateContentFields =
from_json_str(room_create_event.content().get())?;
if room_create_content
.room_version
.is_some_and(|v| v.deserialize().is_err())
{
warn!("invalid room version found in m.room.create event");
return Ok(false);
},
| Some(e) => e,
}
let expected_room_id = match room_version.room_ids_as_hashes {
// If the room version uses hashes, we replace the create event's event ID leading sigil
// with !
| true => OwnedRoomId::try_from(room_create_event.event_id().as_str().replace('$', "!"))
.expect("Failed to convert event ID to room ID")
.clone(),
| false => room_create_event.room_id().unwrap().to_owned(),
};
if incoming_event.room_id() != room_create_event.room_id() {
warn!("room_id of incoming event does not match room_id of m.room.create event");
if incoming_event.room_id().unwrap() != expected_room_id {
warn!(
expected = %expected_room_id,
received = %incoming_event.room_id().unwrap(),
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
incoming_event.room_id().unwrap(),
expected_room_id,
);
return Ok(false);
}
// If the create event is referenced in the event's auth events, and this is a
// v12 room, reject
let claims_create_event = incoming_event
.auth_events()
.any(|id| id == room_create_event.event_id());
if room_version.room_ids_as_hashes && claims_create_event {
warn!("m.room.create event incorrectly found in auth events");
return Ok(false);
} else if !room_version.room_ids_as_hashes && !claims_create_event {
// If the create event is not referenced in the event's auth events, and this is
// a v11 room, reject
warn!("no m.room.create event found in auth events");
return Ok(false);
}
if let Some(ref pe) = power_levels_event {
if pe.room_id() != room_create_event.room_id() {
warn!("room_id of power levels event does not match room_id of m.room.create event");
if *pe.room_id().unwrap() != expected_room_id {
warn!(
expected = %expected_room_id,
received = %pe.room_id().unwrap(),
"room_id of power levels event does not match room_id of m.room.create event"
);
return Ok(false);
}
}
// 3. If event does not have m.room.create in auth_events reject
if !incoming_event
.auth_events()
.any(|id| id == room_create_event.event_id())
{
warn!("no m.room.create event in auth events");
return Ok(false);
}
// removed as part of Hydra.
// TODO: reintroduce this for <v12 lol
// if !incoming_event
// .auth_events()
// .any(|id| id == room_create_event.event_id())
// {
// warn!("no m.room.create event in auth events");
// return Ok(false);
// }
// If the create event content has the field m.federate set to false and the
// sender domain of the event does not match the sender domain of the create
// event, reject.
#[derive(Deserialize)]
#[allow(clippy::items_after_statements)]
struct RoomCreateContentFederate {
#[serde(rename = "m.federate", default = "ruma::serde::default_true")]
federate: bool,
}
let room_create_content: RoomCreateContentFederate =
from_json_str(room_create_event.content().get())?;
if !room_create_content.federate
if !room_version.room_ids_as_hashes
&& !room_create_content.federate
&& room_create_event.sender().server_name() != incoming_event.sender().server_name()
{
warn!(
@ -321,7 +393,7 @@ where
debug!("starting m.room.member check");
let state_key = match incoming_event.state_key() {
| None => {
warn!("no statekey in member event");
warn!("no state key in member event");
return Ok(false);
},
| Some(s) => s,
@ -377,6 +449,7 @@ where
&user_for_join_auth_membership,
&room_create_event,
)? {
warn!("membership change not valid for some reason");
return Ok(false);
}
@ -394,8 +467,16 @@ where
},
};
if sender_member_event.room_id() != room_create_event.room_id() {
warn!("room_id of incoming event does not match room_id of m.room.create event");
if sender_member_event
.room_id()
.expect("we have a room ID for non create events")
!= room_create_event.room_id_or_hash()
{
warn!(
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
sender_member_event.room_id_or_hash(),
room_create_event.room_id_or_hash()
);
return Ok(false);
}
@ -417,7 +498,7 @@ where
}
// If type is m.room.third_party_invite
let sender_power_level = match &power_levels_event {
let mut sender_power_level = match &power_levels_event {
| Some(pl) => {
let content =
deserialize_power_levels_content_fields(pl.content().get(), room_version)?;
@ -439,6 +520,24 @@ where
if is_creator { int!(100) } else { int!(0) }
},
};
if room_version.explicitly_privilege_room_creators {
// If the user sent the create event, or is listed in additional_creators, just
// give them Int::MAX
if sender == room_create_event.sender()
|| room_create_content
.additional_creators
.as_ref()
.is_some_and(|creators| {
creators
.iter()
.any(|c| c.deserialize().is_ok_and(|c| c == *sender))
}) {
trace!("privileging room creator or additional creator");
// This user is the room creator or an additional creator, give them max power
// level
sender_power_level = Int::MAX;
}
}
// Allow if and only if sender's current power level is greater than
// or equal to the invite level
@ -554,6 +653,7 @@ where
struct GetThirdPartyInvite {
third_party_invite: Option<Raw<ThirdPartyInvite>>,
}
let create_content = from_json_str::<RoomCreateContentFields>(create_room.content().get())?;
let content = current_event.content();
let target_membership = from_json_str::<GetMembership>(content.get())?.membership;
@ -576,15 +676,36 @@ where
| None => RoomPowerLevelsEventContent::default(),
};
let sender_power = power_levels
let mut sender_power = power_levels
.users
.get(sender)
.or_else(|| sender_is_joined.then_some(&power_levels.users_default));
let target_power = power_levels.users.get(target_user).or_else(|| {
let mut target_power = power_levels.users.get(target_user).or_else(|| {
(target_membership == MembershipState::Join).then_some(&power_levels.users_default)
});
let mut creators = BTreeSet::new();
creators.insert(create_room.sender().to_owned());
if room_version.explicitly_privilege_room_creators {
// Explicitly privilege room creators
// If the sender sent the create event, or in additional_creators, give them
// Int::MAX. Same case for target.
if let Some(additional_creators) = &create_content.additional_creators {
for c in additional_creators {
if let Ok(c) = c.deserialize() {
creators.insert(c);
}
}
}
if creators.contains(sender) {
sender_power = Some(&Int::MAX);
}
if creators.contains(target_user) {
target_power = Some(&Int::MAX);
}
}
let mut join_rules = JoinRule::Invite;
if let Some(jr) = &join_rules_event {
join_rules = from_json_str::<RoomJoinRulesEventContent>(jr.content().get())?.join_rule;
@ -597,7 +718,7 @@ where
let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth {
// Is the authorised user allowed to invite users into this room
let (auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event {
let (mut auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event {
// TODO Refactor all powerlevel parsing
let invite =
deserialize_power_levels_content_invite(pl.content().get(), room_version)?.invite;
@ -613,6 +734,9 @@ where
} else {
(int!(0), int!(0))
};
if creators.contains(user_for_join_auth) {
auth_user_pl = Int::MAX;
}
(user_for_join_auth_membership == &MembershipState::Join)
&& (auth_user_pl >= invite_level)
} else {
@ -622,6 +746,7 @@ where
Ok(match target_membership {
| MembershipState::Join => {
debug!("starting target_membership=join check");
// 1. If the only previous event is an m.room.create and the state_key is the
// creator,
// allow
@ -633,7 +758,10 @@ where
let no_more_prev_events = prev_events.next().is_none();
if prev_event_is_create_event && no_more_prev_events {
let is_creator = if room_version.use_room_create_sender {
debug!("checking if sender is a room creator for initial membership event");
let is_creator = if room_version.explicitly_privilege_room_creators {
creators.contains(target_user) && creators.contains(sender)
} else if room_version.use_room_create_sender {
let creator = create_room.sender();
creator == sender && creator == target_user
@ -647,10 +775,15 @@ where
};
if is_creator {
debug!("sender is room creator, allowing join");
return Ok(true);
}
debug!("sender is not room creator, proceeding with normal auth checks");
}
let membership_allows_join = matches!(
target_user_current_membership,
MembershipState::Join | MembershipState::Invite
);
if sender != target_user {
// If the sender does not match state_key, reject.
warn!("Can't make other user join");
@ -659,39 +792,48 @@ where
// If the sender is banned, reject.
warn!(?target_user_membership_event_id, "Banned user can't join");
false
} else if (join_rules == JoinRule::Invite
|| room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_))))
// If the join_rule is invite then allow if membership state is invite or join
&& (target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Invite)
{
true
} else if room_version.restricted_join_rules
&& matches!(join_rules, JoinRule::Restricted(_))
|| room_version.knock_restricted_join_rule
&& matches!(join_rules, JoinRule::KnockRestricted(_))
{
// If the join_rule is restricted or knock_restricted
if matches!(
target_user_current_membership,
MembershipState::Invite | MembershipState::Join
) {
// If membership state is join or invite, allow.
true
} else {
// If the join_authorised_via_users_server key in content is not a user with
// sufficient permission to invite other users, reject.
// Otherwise, allow.
user_for_join_auth_is_valid
match join_rules {
| JoinRule::Invite if !membership_allows_join => {
warn!("Join rule is invite but membership does not allow join");
false
},
| JoinRule::Knock if !room_version.allow_knocking => {
warn!("Join rule is knock but room version does not allow knocking");
false
},
| JoinRule::Knock if !membership_allows_join => {
warn!("Join rule is knock but membership does not allow join");
false
},
| JoinRule::KnockRestricted(_) if !room_version.knock_restricted_join_rule =>
{
warn!(
"Join rule is knock_restricted but room version does not support it"
);
false
},
| JoinRule::KnockRestricted(_) if !membership_allows_join => {
warn!("Join rule is knock_restricted but membership does not allow join");
false
},
| JoinRule::Restricted(_) | JoinRule::KnockRestricted(_) =>
if !user_for_join_auth_is_valid {
warn!(
"Join rule is a restricted one but no valid authorising user \
was given"
);
false
} else {
true
},
| _ => true,
}
} else {
// If the join_rule is public, allow.
// Otherwise, reject.
join_rules == JoinRule::Public
}
},
| MembershipState::Invite => {
// If content has third_party_invite key
debug!("starting target_membership=invite check");
match third_party_invite.and_then(|i| i.deserialize().ok()) {
| Some(tp_id) =>
if target_user_current_membership == MembershipState::Ban {
@ -849,6 +991,7 @@ fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int)
required_level = i64::from(event_type_power_level),
user_level = i64::from(user_level),
state_key = ?event.state_key(),
power_level_event_id = ?ple.map(|e| e.event_id().as_str()),
"permissions factors",
);

View file

@ -92,6 +92,11 @@ where
Pdu: Event + Clone + Send + Sync,
for<'b> &'b Pdu: Event + Send,
{
use RoomVersionId::*;
let stateres_version = match room_version {
| V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 | V11 => 2.0,
| _ => 2.1,
};
debug!("State resolution starting");
// Split non-conflicting and conflicting state
@ -108,13 +113,27 @@ where
debug!(count = conflicting.len(), "conflicting events");
trace!(map = ?conflicting, "conflicting events");
let conflicted_state_subgraph: HashSet<_> = if stateres_version >= 2.1 {
calculate_conflicted_subgraph(&conflicting, event_fetch)
.await
.ok_or_else(|| {
Error::InvalidPdu("Failed to calculate conflicted subgraph".to_owned())
})?
} else {
HashSet::new()
};
debug!(count = conflicted_state_subgraph.len(), "conflicted subgraph");
trace!(set = ?conflicted_state_subgraph, "conflicted subgraph");
let conflicting_values = conflicting.into_values().flatten().stream();
// `all_conflicted` contains unique items
// synapse says `full_set = {eid for eid in full_conflicted_set if eid in
// event_map}`
// Hydra: Also consider the conflicted state subgraph
let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets)
.chain(conflicting_values)
.chain(conflicted_state_subgraph.into_iter().stream())
.broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id))
.collect()
.await;
@ -150,6 +169,7 @@ where
// Sequentially auth check each control event.
let resolved_control = iterative_auth_check(
&room_version,
stateres_version,
sorted_control_levels.iter().stream().map(AsRef::as_ref),
clean.clone(),
&event_fetch,
@ -183,10 +203,11 @@ where
let sorted_left_events =
mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?;
trace!(list = ?sorted_left_events, "events left, sorted");
trace!(list = ?sorted_left_events, "events left, sorted, running iterative auth check");
let mut resolved_state = iterative_auth_check(
&room_version,
stateres_version,
sorted_left_events.iter().stream().map(AsRef::as_ref),
resolved_control, // The control events are added to the final resolved state
&event_fetch,
@ -198,6 +219,7 @@ where
resolved_state.extend(clean);
debug!("state resolution finished");
trace!( map = ?resolved_state, "final resolved state" );
Ok(resolved_state)
}
@ -250,6 +272,52 @@ where
(unconflicted_state, conflicted_state)
}
/// Calculate the conflicted subgraph
async fn calculate_conflicted_subgraph<F, Fut, E>(
conflicted: &StateMap<Vec<OwnedEventId>>,
fetch_event: &F,
) -> Option<HashSet<OwnedEventId>>
where
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E: Event + Send + Sync,
{
let conflicted_events: HashSet<_> = conflicted.values().flatten().cloned().collect();
let mut subgraph: HashSet<OwnedEventId> = HashSet::new();
let mut stack: Vec<Vec<OwnedEventId>> =
vec![conflicted_events.iter().cloned().collect::<Vec<_>>()];
let mut path: Vec<OwnedEventId> = Vec::new();
let mut seen: HashSet<OwnedEventId> = HashSet::new();
let next_event = |stack: &mut Vec<Vec<_>>, path: &mut Vec<_>| {
while stack.last().is_some_and(std::vec::Vec::is_empty) {
stack.pop();
path.pop();
}
stack.last_mut().and_then(std::vec::Vec::pop)
};
while let Some(event_id) = next_event(&mut stack, &mut path) {
path.push(event_id.clone());
if subgraph.contains(&event_id) {
if path.len() > 1 {
subgraph.extend(path.iter().cloned());
}
path.pop();
continue;
}
if conflicted_events.contains(&event_id) && path.len() > 1 {
subgraph.extend(path.iter().cloned());
}
if seen.contains(&event_id) {
path.pop();
continue;
}
let evt = fetch_event(event_id.clone()).await?;
stack.push(evt.auth_events().map(ToOwned::to_owned).collect());
seen.insert(event_id);
}
Some(subgraph)
}
/// Returns a Vec of deduped EventIds that appear in some chains but not others.
#[allow(clippy::arithmetic_side_effects)]
fn get_auth_chain_diff<Id, Hasher>(
@ -513,8 +581,10 @@ where
/// For each `events_to_check` event we gather the events needed to auth it from
/// the the `fetch_event` closure and verify each event using the
/// `event_auth::auth_check` function.
#[tracing::instrument(level = "trace", skip_all)]
async fn iterative_auth_check<'a, E, F, Fut, S>(
room_version: &RoomVersion,
stateres_version: f32,
events_to_check: S,
unconflicted_state: StateMap<OwnedEventId>,
fetch_event: &F,
@ -538,12 +608,15 @@ where
.try_collect()
.boxed()
.await?;
trace!(list = ?events_to_check, "events to check");
let auth_event_ids: HashSet<OwnedEventId> = events_to_check
.iter()
.flat_map(|event: &E| event.auth_events().map(ToOwned::to_owned))
.collect();
trace!(set = ?auth_event_ids, "auth event IDs to fetch");
let auth_events: HashMap<OwnedEventId, E> = auth_event_ids
.into_iter()
.stream()
@ -553,9 +626,15 @@ where
.boxed()
.await;
trace!(map = ?auth_events.keys().collect::<Vec<_>>(), "fetched auth events");
let auth_events = &auth_events;
let mut resolved_state = unconflicted_state;
let mut resolved_state = match stateres_version {
| 2.1 => StateMap::new(),
| _ => unconflicted_state,
};
for event in events_to_check {
trace!(event_id = event.event_id().as_str(), "checking event");
let state_key = event
.state_key()
.ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?;
@ -565,13 +644,29 @@ where
event.sender(),
Some(state_key),
event.content(),
room_version,
)?;
trace!(list = ?auth_types, event_id = event.event_id().as_str(), "auth types for event");
let mut auth_state = StateMap::new();
if room_version.room_ids_as_hashes {
trace!("room version uses hashed IDs, manually fetching create event");
let create_event_id_raw = event.room_id_or_hash().as_str().replace('!', "$");
let create_event_id = EventId::parse(&create_event_id_raw).map_err(|e| {
Error::InvalidPdu(format!(
"Failed to parse create event ID from room ID/hash: {e}"
))
})?;
let create_event = fetch_event(create_event_id.into())
.await
.ok_or_else(|| Error::NotFound("Failed to find create event".into()))?;
auth_state.insert(create_event.event_type().with_state_key(""), create_event);
}
for aid in event.auth_events() {
if let Some(ev) = auth_events.get(aid) {
//TODO: synapse checks "rejected_reason" which is most likely related to
// soft-failing
trace!(event_id = aid.as_str(), "found auth event");
auth_state.insert(
ev.event_type()
.with_state_key(ev.state_key().ok_or_else(|| {
@ -600,8 +695,9 @@ where
auth_state.insert(key.to_owned(), event);
})
.await;
trace!(map = ?auth_state.keys().collect::<Vec<_>>(), event_id = event.event_id().as_str(), "auth state for event");
debug!("event to check {:?}", event.event_id());
debug!(event_id = event.event_id().as_str(), "Running auth checks");
// The key for this is (eventType + a state_key of the signed token not sender)
// so search for it
@ -617,16 +713,29 @@ where
)
};
let auth_result =
auth_check(room_version, &event, current_third_party, fetch_state).await;
let auth_result = auth_check(
room_version,
&event,
current_third_party,
fetch_state,
&fetch_state(&StateEventType::RoomCreate, "")
.await
.expect("create event must exist"),
)
.await;
match auth_result {
| Ok(true) => {
// add event to resolved state map
trace!(
event_id = event.event_id().as_str(),
"event passed the authentication check, adding to resolved state"
);
resolved_state.insert(
event.event_type().with_state_key(state_key),
event.event_id().to_owned(),
);
trace!(map = ?resolved_state, "new resolved state");
},
| Ok(false) => {
// synapse passes here on AuthError. We do not add this event to resolved_state.
@ -638,7 +747,8 @@ where
},
}
}
trace!(map = ?resolved_state, "final resolved state from iterative auth check");
debug!("iterative auth check finished");
Ok(resolved_state)
}
@ -909,6 +1019,7 @@ mod tests {
let resolved_power = super::iterative_auth_check(
&RoomVersion::V6,
2.1,
sorted_power_events.iter().map(AsRef::as_ref).stream(),
HashMap::new(), // unconflicted events
&fetcher,

View file

@ -61,25 +61,34 @@ pub struct RoomVersion {
pub extra_redaction_checks: bool,
/// Allow knocking in event authentication.
///
/// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/) for more information.
/// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/)
pub allow_knocking: bool,
/// Adds support for the restricted join rule.
///
/// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289) for more information.
/// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289)
pub restricted_join_rules: bool,
/// Adds support for the knock_restricted join rule.
///
/// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) for more information.
/// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787)
pub knock_restricted_join_rule: bool,
/// Enforces integer power levels.
///
/// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667) for more information.
/// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667)
pub integer_power_levels: bool,
/// Determine the room creator using the `m.room.create` event's `sender`,
/// instead of the event content's `creator` field.
///
/// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175) for more information.
/// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175)
pub use_room_create_sender: bool,
/// Whether the room creators are considered superusers.
/// A superuser will always have infinite power levels in the room.
///
/// See: [MSC4289](https://github.com/matrix-org/matrix-spec-proposals/pull/4289)
pub explicitly_privilege_room_creators: bool,
/// Whether the room's m.room.create event ID is itself the room ID.
///
/// See: [MSC4291](https://github.com/matrix-org/matrix-spec-proposals/pull/4291)
pub room_ids_as_hashes: bool,
}
impl RoomVersion {
@ -97,6 +106,8 @@ impl RoomVersion {
knock_restricted_join_rule: false,
integer_power_levels: false,
use_room_create_sender: false,
explicitly_privilege_room_creators: false,
room_ids_as_hashes: false,
};
pub const V10: Self = Self {
knock_restricted_join_rule: true,
@ -107,6 +118,11 @@ impl RoomVersion {
use_room_create_sender: true,
..Self::V10
};
pub const V12: Self = Self {
explicitly_privilege_room_creators: true,
room_ids_as_hashes: true,
..Self::V11
};
pub const V2: Self = Self {
state_res: StateResolutionVersion::V2,
..Self::V1
@ -144,6 +160,7 @@ impl RoomVersion {
| RoomVersionId::V9 => Self::V9,
| RoomVersionId::V10 => Self::V10,
| RoomVersionId::V11 => Self::V11,
| RoomVersionId::V12 => Self::V12,
| ver => return Err(Error::Unsupported(format!("found version `{ver}`"))),
})
}

View file

@ -24,7 +24,7 @@ use serde_json::{
use super::auth_types_for_event;
use crate::{
Result, info,
Result, RoomVersion, info,
matrix::{Event, EventTypeExt, Pdu, StateMap, pdu::EventHash},
};
@ -154,6 +154,7 @@ pub(crate) async fn do_check(
fake_event.sender(),
fake_event.state_key(),
fake_event.content(),
&RoomVersion::V6,
)
.unwrap();
@ -398,7 +399,7 @@ pub(crate) fn to_init_pdu_event(
Pdu {
event_id: id.try_into().unwrap(),
room_id: room_id().to_owned(),
room_id: Some(room_id().to_owned()),
sender: sender.to_owned(),
origin_server_ts: ts.try_into().unwrap(),
state_key: state_key.map(Into::into),
@ -446,7 +447,7 @@ where
Pdu {
event_id: id.try_into().unwrap(),
room_id: room_id().to_owned(),
room_id: Some(room_id().to_owned()),
sender: sender.to_owned(),
origin_server_ts: ts.try_into().unwrap(),
state_key: state_key.map(Into::into),

View file

@ -19,7 +19,7 @@ where
S: Stream<Item = K> + Send + 'a,
K: AsRef<[u8]> + Send + Sync + 'a,
{
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a;
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
}
impl<'a, K, S> Get<'a, K, S> for S
@ -29,7 +29,7 @@ where
K: AsRef<[u8]> + Send + Sync + 'a,
{
#[inline]
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a {
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
map.get_batch(self)
}
}
@ -39,7 +39,7 @@ where
pub(crate) fn get_batch<'a, S, K>(
self: &'a Arc<Self>,
keys: S,
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
where
S: Stream<Item = K> + Send + 'a,
K: AsRef<[u8]> + Send + Sync + 'a,

View file

@ -10,7 +10,7 @@ use super::stream::is_cached;
use crate::{keyval, keyval::Key, stream};
#[implement(super::Map)]
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
where
K: Deserialize<'a> + Send,
{

View file

@ -15,7 +15,7 @@ use crate::{
pub fn keys_from<'a, K, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -40,7 +40,7 @@ where
pub fn keys_raw_from<'a, K, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,

View file

@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
pub fn keys_prefix<'a, K, P>(
self: &'a Arc<Self>,
prefix: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -37,7 +37,7 @@ where
pub fn keys_raw_prefix<'a, K, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a,
@ -50,7 +50,7 @@ where
pub fn raw_keys_prefix<'a, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a
) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{

View file

@ -17,7 +17,7 @@ where
S: Stream<Item = K> + Send + 'a,
K: Serialize + Debug,
{
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a;
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
}
impl<'a, K, S> Qry<'a, K, S> for S
@ -27,7 +27,7 @@ where
K: Serialize + Debug + 'a,
{
#[inline]
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a {
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
map.qry_batch(self)
}
}
@ -37,7 +37,7 @@ where
pub(crate) fn qry_batch<'a, S, K>(
self: &'a Arc<Self>,
keys: S,
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
where
S: Stream<Item = K> + Send + 'a,
K: Serialize + Debug + 'a,

View file

@ -10,7 +10,7 @@ use super::rev_stream::is_cached;
use crate::{keyval, keyval::Key, stream};
#[implement(super::Map)]
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
where
K: Deserialize<'a> + Send,
{

View file

@ -15,7 +15,7 @@ use crate::{
pub fn rev_keys_from<'a, K, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -41,7 +41,7 @@ where
pub fn rev_keys_raw_from<'a, K, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,

View file

@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
pub fn rev_keys_prefix<'a, K, P>(
self: &'a Arc<Self>,
prefix: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -37,7 +37,7 @@ where
pub fn rev_keys_raw_prefix<'a, K, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a,
@ -50,7 +50,7 @@ where
pub fn rev_raw_keys_prefix<'a, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a
) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{

View file

@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
#[implement(super::Map)]
pub fn rev_stream<'a, K, V>(
self: &'a Arc<Self>,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
where
K: Deserialize<'a> + Send,
V: Deserialize<'a> + Send,

View file

@ -20,7 +20,7 @@ use crate::{
pub fn rev_stream_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -55,7 +55,7 @@ where
pub fn rev_stream_raw_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,

View file

@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
pub fn rev_stream_prefix<'a, K, V, P>(
self: &'a Arc<Self>,
prefix: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -50,7 +50,7 @@ where
pub fn rev_stream_raw_prefix<'a, K, V, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a,
@ -68,7 +68,7 @@ where
pub fn rev_raw_stream_prefix<'a, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a
) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{

View file

@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
#[implement(super::Map)]
pub fn stream<'a, K, V>(
self: &'a Arc<Self>,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
where
K: Deserialize<'a> + Send,
V: Deserialize<'a> + Send,

View file

@ -19,7 +19,7 @@ use crate::{
pub fn stream_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -53,7 +53,7 @@ where
pub fn stream_raw_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,

View file

@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
pub fn stream_prefix<'a, K, V, P>(
self: &'a Arc<Self>,
prefix: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -50,7 +50,7 @@ where
pub fn stream_raw_prefix<'a, K, V, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a,
@ -68,7 +68,7 @@ where
pub fn raw_stream_prefix<'a, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a
) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{

View file

@ -443,7 +443,7 @@ pub(crate) fn into_send_seek(result: stream::State<'_>) -> stream::State<'static
unsafe { std::mem::transmute(result) }
}
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'_> {
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'static> {
// SAFETY: This is to receive the State from the channel; see above.
unsafe { std::mem::transmute(result) }
}

View file

@ -326,7 +326,7 @@ fn ser_array() {
}
#[test]
#[ignore]
#[ignore = "arrayvec deserialization is not implemented (separators)"]
fn de_array() {
let a: u64 = 123_456;
let b: u64 = 987_654;
@ -358,7 +358,7 @@ fn de_array() {
}
#[test]
#[ignore]
#[ignore = "Nested sequences are not supported"]
fn de_complex() {
type Key<'a> = (&'a UserId, ArrayVec<u64, 2>, &'a RoomId);

View file

@ -1,6 +1,6 @@
use std::collections::BTreeMap;
use conduwuit::{Result, pdu::PduBuilder};
use conduwuit::{Result, info, pdu::PduBuilder};
use futures::FutureExt;
use ruma::{
RoomId, RoomVersionId,
@ -26,7 +26,7 @@ use crate::Services;
/// used to issue admin commands by talking to the server user inside it.
pub async fn create_admin_room(services: &Services) -> Result {
let room_id = RoomId::new(services.globals.server_name());
let room_version = &services.config.default_room_version;
let room_version = &RoomVersionId::V11;
let _short_id = services
.rooms
@ -45,10 +45,13 @@ pub async fn create_admin_room(services: &Services) -> Result {
match room_version {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
RoomCreateEventContent::new_v1(server_user.into()),
| _ => RoomCreateEventContent::new_v11(),
| V11 => RoomCreateEventContent::new_v11(),
| _ => RoomCreateEventContent::new_v12(),
}
};
info!("Creating admin room {} with version {}", room_id, room_version);
// 1. The room create event
services
.rooms
@ -61,7 +64,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
..create_content
}),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -77,7 +80,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&RoomMemberEventContent::new(MembershipState::Join),
),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -95,7 +98,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
..Default::default()
}),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -108,7 +111,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomJoinRulesEventContent::new(JoinRule::Invite)),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -124,7 +127,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -140,7 +143,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&RoomGuestAccessEventContent::new(GuestAccess::Forbidden),
),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -154,7 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomNameEventContent::new(room_name)),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -168,7 +171,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://continuwuity.org/", services.config.server_name),
}),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -186,7 +189,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
alt_aliases: Vec::new(),
}),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()
@ -204,7 +207,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomPreviewUrlsEventContent { disabled: true }),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.boxed()

View file

@ -55,7 +55,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
&RoomMemberEventContent::new(MembershipState::Invite),
),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.await?;
@ -69,7 +69,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
&RoomMemberEventContent::new(MembershipState::Join),
),
user_id,
&room_id,
Some(&room_id),
&state_lock,
)
.await?;
@ -83,7 +83,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
&RoomMemberEventContent::new(MembershipState::Invite),
),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.await?;
@ -111,7 +111,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
.build_and_append_pdu(
PduBuilder::state(String::new(), &room_power_levels),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.await?;
@ -135,7 +135,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
.build_and_append_pdu(
PduBuilder::timeline(&RoomMessageEventContent::text_markdown(welcome_message)),
server_user,
&room_id,
Some(&room_id),
&state_lock,
)
.await?;
@ -218,7 +218,7 @@ pub async fn revoke_admin(&self, user_id: &UserId) -> Result {
..event
}),
self.services.globals.server_user.as_ref(),
&room_id,
Some(&room_id),
&state_lock,
)
.await

View file

@ -393,13 +393,13 @@ impl Service {
return Ok(());
};
let response_sender = if self.is_admin_room(pdu.room_id()).await {
let response_sender = if self.is_admin_room(pdu.room_id().unwrap()).await {
&self.services.globals.server_user
} else {
pdu.sender()
};
self.respond_to_room(content, pdu.room_id(), response_sender)
self.respond_to_room(content, pdu.room_id().unwrap(), response_sender)
.boxed()
.await
}
@ -419,7 +419,7 @@ impl Service {
.build_and_append_pdu(
PduBuilder::timeline(&self.text_or_file(content).await),
user_id,
room_id,
Some(room_id),
&state_lock,
)
.await
@ -447,7 +447,12 @@ impl Service {
self.services
.timeline
.build_and_append_pdu(PduBuilder::timeline(&content), user_id, room_id, state_lock)
.build_and_append_pdu(
PduBuilder::timeline(&content),
user_id,
Some(room_id),
state_lock,
)
.await?;
Ok(())
@ -484,7 +489,10 @@ impl Service {
}
// Prevent unescaped !admin from being used outside of the admin room
if is_public_prefix && !self.is_admin_room(event.room_id()).await {
if event.room_id().is_some()
&& is_public_prefix
&& !self.is_admin_room(event.room_id().unwrap()).await
{
return false;
}
@ -497,7 +505,7 @@ impl Service {
// the administrator can execute commands as the server user
let emergency_password_set = self.services.server.config.emergency_password.is_some();
let from_server = event.sender() == server_user && !emergency_password_set;
if from_server && self.is_admin_room(event.room_id()).await {
if from_server && self.is_admin_room(event.room_id().unwrap()).await {
return false;
}

View file

@ -215,8 +215,8 @@ async fn db_lt_12(services: &Services) -> Result<()> {
for username in &services
.users
.list_local_users()
.map(UserId::to_owned)
.collect::<Vec<_>>()
.map(ToOwned::to_owned)
.collect::<Vec<OwnedUserId>>()
.await
{
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)
@ -295,8 +295,8 @@ async fn db_lt_13(services: &Services) -> Result<()> {
for username in &services
.users
.list_local_users()
.map(UserId::to_owned)
.collect::<Vec<_>>()
.map(ToOwned::to_owned)
.collect::<Vec<OwnedUserId>>()
.await
{
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)

View file

@ -183,8 +183,8 @@ impl Service {
.services
.users
.list_local_users()
.map(UserId::to_owned)
.collect::<Vec<_>>()
.map(ToOwned::to_owned)
.collect::<Vec<OwnedUserId>>()
.await
{
let presence = self.db.get_presence(user_id).await;

View file

@ -178,7 +178,7 @@ impl Service {
pub fn get_pushkeys<'a>(
&'a self,
sender: &'a UserId,
) -> impl Stream<Item = &str> + Send + 'a {
) -> impl Stream<Item = &'a str> + Send + 'a {
let prefix = (sender, Interfix);
self.db
.senderkey_pusher
@ -287,18 +287,22 @@ impl Service {
{
let mut notify = None;
let mut tweaks = Vec::new();
if event.room_id().is_none() {
// TODO(hydra): does this matter?
return Ok(());
}
let power_levels: RoomPowerLevelsEventContent = self
.services
.state_accessor
.room_state_get(event.room_id(), &StateEventType::RoomPowerLevels, "")
.room_state_get(event.room_id().unwrap(), &StateEventType::RoomPowerLevels, "")
.await
.and_then(|event| event.get_content())
.unwrap_or_default();
let serialized = event.to_format();
for action in self
.get_actions(user, &ruleset, &power_levels, &serialized, event.room_id())
.get_actions(user, &ruleset, &power_levels, &serialized, event.room_id().unwrap())
.await
{
let n = match action {
@ -426,7 +430,7 @@ impl Service {
let mut notifi = Notification::new(d);
notifi.event_id = Some(event.event_id().to_owned());
notifi.room_id = Some(event.room_id().to_owned());
notifi.room_id = Some(event.room_id().unwrap().to_owned());
if http
.data
.get("org.matrix.msc4076.disable_badge_count")
@ -470,14 +474,14 @@ impl Service {
notifi.room_name = self
.services
.state_accessor
.get_name(event.room_id())
.get_name(event.room_id().unwrap())
.await
.ok();
notifi.room_alias = self
.services
.state_accessor
.get_canonical_alias(event.room_id())
.get_canonical_alias(event.room_id().unwrap())
.await
.ok();

View file

@ -178,7 +178,7 @@ impl Service {
pub fn local_aliases_for_room<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &RoomAliasId> + Send + 'a {
) -> impl Stream<Item = &'a RoomAliasId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.aliasid_alias
@ -188,7 +188,9 @@ impl Service {
}
#[tracing::instrument(skip(self), level = "debug")]
pub fn all_local_aliases<'a>(&'a self) -> impl Stream<Item = (&RoomId, &str)> + Send + 'a {
pub fn all_local_aliases<'a>(
&'a self,
) -> impl Stream<Item = (&'a RoomId, &'a str)> + Send + 'a {
self.db
.alias_roomid
.stream()

View file

@ -195,11 +195,11 @@ async fn get_auth_chain_inner(
debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events");
},
| Ok(pdu) => {
if pdu.room_id != room_id {
if pdu.room_id.is_some() && pdu.room_id != Some(room_id.to_owned()) {
return Err!(Request(Forbidden(error!(
?event_id,
?room_id,
wrong_room_id = ?pdu.room_id,
wrong_room_id = ?pdu.room_id.unwrap(),
"auth event for incorrect room"
))));
}

View file

@ -58,6 +58,8 @@ pub async fn handle_incoming_pdu<'a>(
value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool,
) -> Result<Option<RawPduId>> {
// TODO(hydra): Room IDs should be calculated before this function is called
assert!(!room_id.is_empty(), "room ID cannot be empty");
// 1. Skip the PDU if we already have it as a timeline event
if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await {
return Ok(Some(pdu_id));

View file

@ -139,6 +139,7 @@ where
&pdu_event,
None, // TODO: third party invite
state_fetch,
create_event.as_pdu(),
)
.await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;

View file

@ -99,7 +99,10 @@ impl Service {
}
fn check_room_id<Pdu: Event>(room_id: &RoomId, pdu: &Pdu) -> Result {
if pdu.room_id() != room_id {
if pdu
.room_id()
.is_some_and(|claimed_room_id| claimed_room_id != room_id)
{
return Err!(Request(InvalidParam(error!(
pdu_event_id = ?pdu.event_id(),
pdu_room_id = ?pdu.room_id(),

View file

@ -102,6 +102,7 @@ where
&incoming_pdu,
None, // TODO: third party invite
|ty, sk| state_fetch(ty.clone(), sk.into()),
create_event.as_pdu(),
)
.await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
@ -123,6 +124,7 @@ where
incoming_pdu.sender(),
incoming_pdu.state_key(),
incoming_pdu.content(),
&room_version,
)
.await?;
@ -140,6 +142,7 @@ where
&incoming_pdu,
None, // third-party invite
state_fetch,
create_event.as_pdu(),
)
.await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
@ -156,7 +159,7 @@ where
!self
.services
.state_accessor
.user_can_redact(&redact_id, incoming_pdu.sender(), incoming_pdu.room_id(), true)
.user_can_redact(&redact_id, incoming_pdu.sender(), room_id, true)
.await?,
};
@ -313,6 +316,7 @@ where
state_ids_compressed,
soft_fail,
&state_lock,
room_id,
)
.await?;
@ -347,6 +351,7 @@ where
state_ids_compressed,
soft_fail,
&state_lock,
room_id,
)
.await?;

View file

@ -60,7 +60,7 @@ impl Data {
target: ShortEventId,
from: PduCount,
dir: Direction,
) -> impl Stream<Item = (PduCount, impl Event)> + Send + '_ {
) -> impl Stream<Item = (PduCount, impl Event)> + Send + 'a {
// Query from exact position then filter excludes it (saturating_inc could skip
// events at min/max boundaries)
let from_unsigned = from.into_unsigned();

View file

@ -65,7 +65,7 @@ impl Data {
&'a self,
room_id: &'a RoomId,
since: u64,
) -> impl Stream<Item = ReceiptItem<'_>> + Send + 'a {
) -> impl Stream<Item = ReceiptItem<'a>> + Send + 'a {
type Key<'a> = (&'a RoomId, u64, &'a UserId);
type KeyVal<'a> = (Key<'a>, CanonicalJsonObject);

View file

@ -112,7 +112,7 @@ impl Service {
&'a self,
room_id: &'a RoomId,
since: u64,
) -> impl Stream<Item = ReceiptItem<'_>> + Send + 'a {
) -> impl Stream<Item = ReceiptItem<'a>> + Send + 'a {
self.db.readreceipts_since(room_id, since)
}

View file

@ -104,7 +104,7 @@ pub fn deindex_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_b
pub async fn search_pdus<'a>(
&'a self,
query: &'a RoomQuery<'a>,
) -> Result<(usize, impl Stream<Item = impl Event + use<>> + Send + '_)> {
) -> Result<(usize, impl Stream<Item = impl Event + use<>> + Send + 'a)> {
let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await;
let filter = &query.criteria.filter;
@ -124,7 +124,7 @@ pub async fn search_pdus<'a>(
.wide_filter_map(move |pdu| async move {
self.services
.state_accessor
.user_can_see_event(query.user_id?, pdu.room_id(), pdu.event_id())
.user_can_see_event(query.user_id?, pdu.room_id().unwrap(), pdu.event_id())
.await
.then_some(pdu)
})
@ -137,10 +137,10 @@ pub async fn search_pdus<'a>(
// result is modeled as a stream such that callers don't have to be refactored
// though an additional async/wrap still exists for now
#[implement(Service)]
pub async fn search_pdu_ids(
&self,
query: &RoomQuery<'_>,
) -> Result<impl Stream<Item = RawPduId> + Send + '_ + use<'_>> {
pub async fn search_pdu_ids<'a>(
&'a self,
query: &'a RoomQuery<'_>,
) -> Result<impl Stream<Item = RawPduId> + Send + 'a + use<'a>> {
let shortroomid = self.services.short.get_shortroomid(query.room_id).await?;
let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await;
@ -173,7 +173,7 @@ fn search_pdu_ids_query_words<'a>(
&'a self,
shortroomid: ShortRoomId,
word: &'a str,
) -> impl Stream<Item = RawPduId> + Send + '_ {
) -> impl Stream<Item = RawPduId> + Send + 'a {
self.search_pdu_ids_query_word(shortroomid, word)
.map(move |key| -> RawPduId {
let key = &key[prefix_len(word)..];
@ -183,11 +183,11 @@ fn search_pdu_ids_query_words<'a>(
/// Iterate over raw database results for a word
#[implement(Service)]
fn search_pdu_ids_query_word(
&self,
fn search_pdu_ids_query_word<'a>(
&'a self,
shortroomid: ShortRoomId,
word: &str,
) -> impl Stream<Item = Val<'_>> + Send + '_ + use<'_> {
word: &'a str,
) -> impl Stream<Item = Val<'a>> + Send + 'a + use<'a> {
// rustc says const'ing this not yet stable
let end_id: RawPduId = PduId {
shortroomid,

View file

@ -62,7 +62,7 @@ pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEvent
pub fn multi_get_or_create_shorteventid<'a, I>(
&'a self,
event_ids: I,
) -> impl Stream<Item = ShortEventId> + Send + '_
) -> impl Stream<Item = ShortEventId> + Send + 'a
where
I: Iterator<Item = &'a EventId> + Clone + Debug + Send + 'a,
{

View file

@ -1,6 +1,7 @@
use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc};
use async_trait::async_trait;
use conduwuit::{RoomVersion, debug};
use conduwuit_core::{
Event, PduEvent, Result, err,
result::FlatOk,
@ -148,7 +149,7 @@ impl Service {
.roomid_spacehierarchy_cache
.lock()
.await
.remove(&pdu.room_id);
.remove(room_id);
},
| _ => continue,
}
@ -239,7 +240,7 @@ impl Service {
/// This adds all current state events (not including the incoming event)
/// to `stateid_pduid` and adds the incoming event to `eventid_statehash`.
#[tracing::instrument(skip(self, new_pdu), level = "debug")]
pub async fn append_to_state(&self, new_pdu: &PduEvent) -> Result<u64> {
pub async fn append_to_state(&self, new_pdu: &PduEvent, room_id: &RoomId) -> Result<u64> {
const BUFSIZE: usize = size_of::<u64>();
let shorteventid = self
@ -248,7 +249,7 @@ impl Service {
.get_or_create_shorteventid(&new_pdu.event_id)
.await;
let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id).await;
let previous_shortstatehash = self.get_room_shortstatehash(room_id).await;
if let Ok(p) = previous_shortstatehash {
self.db
@ -319,7 +320,11 @@ impl Service {
}
#[tracing::instrument(skip_all, level = "debug")]
pub async fn summary_stripped<'a, E>(&self, event: &'a E) -> Vec<Raw<AnyStrippedStateEvent>>
pub async fn summary_stripped<'a, E>(
&self,
event: &'a E,
room_id: &RoomId,
) -> Vec<Raw<AnyStrippedStateEvent>>
where
E: Event + Send + Sync,
&'a E: Event + Send,
@ -338,7 +343,7 @@ impl Service {
let fetches = cells.into_iter().map(|(event_type, state_key)| {
self.services
.state_accessor
.room_state_get(event.room_id(), event_type, state_key)
.room_state_get(room_id, event_type, state_key)
});
join_all(fetches)
@ -388,7 +393,7 @@ impl Service {
pub fn get_forward_extremities<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &EventId> + Send + '_ {
) -> impl Stream<Item = &'a EventId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
@ -421,7 +426,7 @@ impl Service {
}
/// This fetches auth events from the current state.
#[tracing::instrument(skip(self, content), level = "debug")]
#[tracing::instrument(skip(self, content, room_version), level = "trace")]
pub async fn get_auth_events(
&self,
room_id: &RoomId,
@ -429,13 +434,15 @@ impl Service {
sender: &UserId,
state_key: Option<&str>,
content: &serde_json::value::RawValue,
room_version: &RoomVersion,
) -> Result<StateMap<PduEvent>> {
let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else {
return Ok(HashMap::new());
};
let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?;
let auth_types =
state_res::auth_types_for_event(kind, sender, state_key, content, room_version)?;
debug!(?auth_types, "Auth types for event");
let sauthevents: HashMap<_, _> = auth_types
.iter()
.stream()
@ -448,6 +455,7 @@ impl Service {
})
.collect()
.await;
debug!(?sauthevents, "Auth events to fetch");
let (state_keys, event_ids): (Vec<_>, Vec<_>) = self
.services
@ -461,7 +469,7 @@ impl Service {
})
.unzip()
.await;
debug!(?state_keys, ?event_ids, "Auth events found in state");
self.services
.short
.multi_get_eventid_from_short(event_ids.into_iter().stream())
@ -473,6 +481,7 @@ impl Service {
.get_pdu(&event_id)
.await
.map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu))
.inspect_err(|e| warn!("Failed to get auth event {event_id}: {e:?}"))
.ok()
})
.collect()

View file

@ -161,7 +161,7 @@ pub async fn user_can_invite(
&RoomMemberEventContent::new(MembershipState::Invite),
),
sender,
room_id,
Some(room_id),
state_lock,
)
.await

View file

@ -144,7 +144,7 @@ pub fn clear_appservice_in_room_cache(&self) { self.appservice_in_room_cache.wri
pub fn room_servers<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &ServerName> + Send + 'a {
) -> impl Stream<Item = &'a ServerName> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomserverids
@ -167,7 +167,7 @@ pub async fn server_in_room<'a>(&'a self, server: &'a ServerName, room_id: &'a R
pub fn server_rooms<'a>(
&'a self,
server: &'a ServerName,
) -> impl Stream<Item = &RoomId> + Send + 'a {
) -> impl Stream<Item = &'a RoomId> + Send + 'a {
let prefix = (server, Interfix);
self.db
.serverroomids
@ -202,7 +202,7 @@ pub fn get_shared_rooms<'a>(
&'a self,
user_a: &'a UserId,
user_b: &'a UserId,
) -> impl Stream<Item = &RoomId> + Send + 'a {
) -> impl Stream<Item = &'a RoomId> + Send + 'a {
use conduwuit::utils::set;
let a = self.rooms_joined(user_a);
@ -216,7 +216,7 @@ pub fn get_shared_rooms<'a>(
pub fn room_members<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomuserid_joined
@ -239,7 +239,7 @@ pub async fn room_joined_count(&self, room_id: &RoomId) -> Result<u64> {
pub fn local_users_in_room<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
self.room_members(room_id)
.ready_filter(|user| self.services.globals.user_is_local(user))
}
@ -251,7 +251,7 @@ pub fn local_users_in_room<'a>(
pub fn active_local_users_in_room<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
self.local_users_in_room(room_id)
.filter(|user| self.services.users.is_active(user))
}
@ -273,7 +273,7 @@ pub async fn room_invited_count(&self, room_id: &RoomId) -> Result<u64> {
pub fn room_useroncejoined<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomuseroncejoinedids
@ -288,7 +288,7 @@ pub fn room_useroncejoined<'a>(
pub fn room_members_invited<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomuserid_invitecount
@ -303,7 +303,7 @@ pub fn room_members_invited<'a>(
pub fn room_members_knocked<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomuserid_knockedcount
@ -347,7 +347,7 @@ pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result
pub fn rooms_joined<'a>(
&'a self,
user_id: &'a UserId,
) -> impl Stream<Item = &RoomId> + Send + 'a {
) -> impl Stream<Item = &'a RoomId> + Send + 'a {
self.db
.userroomid_joined
.keys_raw_prefix(user_id)

View file

@ -81,7 +81,7 @@ pub async fn servers_route_via(&self, room_id: &RoomId) -> Result<Vec<OwnedServe
pub fn servers_invite_via<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &ServerName> + Send + 'a {
) -> impl Stream<Item = &'a ServerName> + Send + 'a {
type KeyVal<'a> = (Ignore, Vec<&'a ServerName>);
self.db

View file

@ -42,6 +42,7 @@ pub async fn append_incoming_pdu<'a, Leaves>(
state_ids_compressed: Arc<CompressedState>,
soft_fail: bool,
state_lock: &'a RoomMutexGuard,
room_id: &'a ruma::RoomId,
) -> Result<Option<RawPduId>>
where
Leaves: Iterator<Item = &'a EventId> + Send + 'a,
@ -51,24 +52,24 @@ where
// fail.
self.services
.state
.set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed)
.set_event_state(&pdu.event_id, room_id, state_ids_compressed)
.await?;
if soft_fail {
self.services
.pdu_metadata
.mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref));
.mark_as_referenced(room_id, pdu.prev_events.iter().map(AsRef::as_ref));
self.services
.state
.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)
.set_forward_extremities(room_id, new_room_leaves, state_lock)
.await;
return Ok(None);
}
let pdu_id = self
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock, room_id)
.await?;
Ok(Some(pdu_id))
@ -88,6 +89,7 @@ pub async fn append_pdu<'a, Leaves>(
mut pdu_json: CanonicalJsonObject,
leaves: Leaves,
state_lock: &'a RoomMutexGuard,
room_id: &'a ruma::RoomId,
) -> Result<RawPduId>
where
Leaves: Iterator<Item = &'a EventId> + Send + 'a,
@ -98,7 +100,7 @@ where
let shortroomid = self
.services
.short
.get_shortroomid(pdu.room_id())
.get_shortroomid(room_id)
.await
.map_err(|_| err!(Database("Room does not exist")))?;
@ -151,14 +153,14 @@ where
// We must keep track of all events that have been referenced.
self.services
.pdu_metadata
.mark_as_referenced(pdu.room_id(), pdu.prev_events().map(AsRef::as_ref));
.mark_as_referenced(room_id, pdu.prev_events().map(AsRef::as_ref));
self.services
.state
.set_forward_extremities(pdu.room_id(), leaves, state_lock)
.set_forward_extremities(room_id, leaves, state_lock)
.await;
let insert_lock = self.mutex_insert.lock(pdu.room_id()).await;
let insert_lock = self.mutex_insert.lock(room_id).await;
let count1 = self.services.globals.next_count().unwrap();
@ -166,11 +168,11 @@ where
// appending fails
self.services
.read_receipt
.private_read_set(pdu.room_id(), pdu.sender(), count1);
.private_read_set(room_id, pdu.sender(), count1);
self.services
.user
.reset_notification_counts(pdu.sender(), pdu.room_id());
.reset_notification_counts(pdu.sender(), room_id);
let count2 = PduCount::Normal(self.services.globals.next_count().unwrap());
let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into();
@ -184,14 +186,14 @@ where
let power_levels: RoomPowerLevelsEventContent = self
.services
.state_accessor
.room_state_get_content(pdu.room_id(), &StateEventType::RoomPowerLevels, "")
.room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "")
.await
.unwrap_or_default();
let mut push_target: HashSet<_> = self
.services
.state_cache
.active_local_users_in_room(pdu.room_id())
.active_local_users_in_room(room_id)
.map(ToOwned::to_owned)
// Don't notify the sender of their own events, and dont send from ignored users
.ready_filter(|user| *user != pdu.sender())
@ -230,7 +232,7 @@ where
for action in self
.services
.pusher
.get_actions(user, &rules_for_user, &power_levels, &serialized, pdu.room_id())
.get_actions(user, &rules_for_user, &power_levels, &serialized, room_id)
.await
{
match action {
@ -268,20 +270,20 @@ where
}
self.db
.increment_notification_counts(pdu.room_id(), notifies, highlights);
.increment_notification_counts(room_id, notifies, highlights);
match *pdu.kind() {
| TimelineEventType::RoomRedaction => {
use RoomVersionId::*;
let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?;
let room_version_id = self.services.state.get_room_version(room_id).await?;
match room_version_id {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
if let Some(redact_id) = pdu.redacts() {
if self
.services
.state_accessor
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false)
.user_can_redact(redact_id, pdu.sender(), room_id, false)
.await?
{
self.redact_pdu(redact_id, pdu, shortroomid).await?;
@ -294,7 +296,7 @@ where
if self
.services
.state_accessor
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false)
.user_can_redact(redact_id, pdu.sender(), room_id, false)
.await?
{
self.redact_pdu(redact_id, pdu, shortroomid).await?;
@ -310,7 +312,7 @@ where
.roomid_spacehierarchy_cache
.lock()
.await
.remove(pdu.room_id());
.remove(room_id);
},
| TimelineEventType::RoomMember => {
if let Some(state_key) = pdu.state_key() {
@ -320,8 +322,12 @@ where
let content: RoomMemberEventContent = pdu.get_content()?;
let stripped_state = match content.membership {
| MembershipState::Invite | MembershipState::Knock =>
self.services.state.summary_stripped(pdu).await.into(),
| MembershipState::Invite | MembershipState::Knock => self
.services
.state
.summary_stripped(pdu, room_id)
.await
.into(),
| _ => None,
};
@ -331,7 +337,7 @@ where
self.services
.state_cache
.update_membership(
pdu.room_id(),
room_id,
target_user_id,
content,
pdu.sender(),
@ -392,7 +398,7 @@ where
if self
.services
.state_cache
.appservice_in_room(pdu.room_id(), appservice)
.appservice_in_room(room_id, appservice)
.await
{
self.services
@ -430,12 +436,12 @@ where
let matching_aliases = |aliases: NamespaceRegex| {
self.services
.alias
.local_aliases_for_room(pdu.room_id())
.local_aliases_for_room(room_id)
.ready_any(move |room_alias| aliases.is_match(room_alias.as_str()))
};
if matching_aliases(appservice.aliases.clone()).await
|| appservice.rooms.is_match(pdu.room_id().as_str())
|| appservice.rooms.is_match(room_id.as_str())
|| matching_users(&appservice.users)
{
self.services

View file

@ -1,5 +1,6 @@
use std::{collections::HashSet, iter::once};
use conduwuit::{RoomVersion, debug_warn, trace};
use conduwuit_core::{
Err, Result, implement,
matrix::{event::Event, pdu::PduBuilder},
@ -11,6 +12,7 @@ use ruma::{
events::{
TimelineEventType,
room::{
create::RoomCreateEventContent,
member::{MembershipState, RoomMemberEventContent},
redaction::RoomRedactionEventContent,
},
@ -23,32 +25,36 @@ use super::RoomMutexGuard;
/// takes a roomid_mutex_state, meaning that only this function is able to
/// mutate the room state.
#[implement(super::Service)]
#[tracing::instrument(skip(self, state_lock), level = "debug")]
#[tracing::instrument(skip(self, state_lock), level = "trace")]
pub async fn build_and_append_pdu(
&self,
pdu_builder: PduBuilder,
sender: &UserId,
room_id: &RoomId,
room_id: Option<&RoomId>,
state_lock: &RoomMutexGuard,
) -> Result<OwnedEventId> {
let (pdu, pdu_json) = self
.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)
.await?;
if self.services.admin.is_admin_room(pdu.room_id()).await {
let room_id = pdu.room_id_or_hash();
trace!("Checking if room {room_id} is an admin room");
if self.services.admin.is_admin_room(&room_id).await {
trace!("Room {room_id} is an admin room, checking PDU for admin room restrictions");
self.check_pdu_for_admin_room(&pdu, sender).boxed().await?;
}
// If redaction event is not authorized, do not append it to the timeline
if *pdu.kind() == TimelineEventType::RoomRedaction {
use RoomVersionId::*;
match self.services.state.get_room_version(pdu.room_id()).await? {
trace!("Running redaction checks for room {room_id}");
match self.services.state.get_room_version(&room_id).await? {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
if let Some(redact_id) = pdu.redacts() {
if !self
.services
.state_accessor
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false)
.user_can_redact(redact_id, pdu.sender(), &room_id, false)
.await?
{
return Err!(Request(Forbidden("User cannot redact this event.")));
@ -61,7 +67,7 @@ pub async fn build_and_append_pdu(
if !self
.services
.state_accessor
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false)
.user_can_redact(redact_id, pdu.sender(), &room_id, false)
.await?
{
return Err!(Request(Forbidden("User cannot redact this event.")));
@ -72,6 +78,7 @@ pub async fn build_and_append_pdu(
}
if *pdu.kind() == TimelineEventType::RoomMember {
trace!("Running room member checks for room {room_id}");
let content: RoomMemberEventContent = pdu.get_content()?;
if content.join_authorized_via_users_server.is_some()
@ -93,12 +100,27 @@ pub async fn build_and_append_pdu(
)));
}
}
if *pdu.kind() == TimelineEventType::RoomCreate {
trace!("Running room create checks for room {room_id}");
let content: RoomCreateEventContent = pdu.get_content()?;
let room_features = RoomVersion::new(&content.room_version)?;
if room_features.room_ids_as_hashes {
// bootstrap shortid for room
debug_warn!(%room_id, "Bootstrapping shortid for room");
self.services
.short
.get_or_create_shortroomid(&room_id)
.await;
}
}
// We append to state before appending the pdu, so we don't have a moment in
// time with the pdu without it's state. This is okay because append_pdu can't
// fail.
let statehashid = self.services.state.append_to_state(&pdu).await?;
trace!("Appending {} state for room {room_id}", pdu.event_id());
let statehashid = self.services.state.append_to_state(&pdu, &room_id).await?;
trace!("Generating raw ID for PDU {}", pdu.event_id());
let pdu_id = self
.append_pdu(
&pdu,
@ -107,20 +129,22 @@ pub async fn build_and_append_pdu(
// of the room
once(pdu.event_id()),
state_lock,
&room_id,
)
.boxed()
.await?;
// We set the room state after inserting the pdu, so that we never have a moment
// in time where events in the current room state do not exist
trace!("Setting room state for room {room_id}");
self.services
.state
.set_room_state(pdu.room_id(), statehashid, state_lock);
.set_room_state(&room_id, statehashid, state_lock);
let mut servers: HashSet<OwnedServerName> = self
.services
.state_cache
.room_servers(pdu.room_id())
.room_servers(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
@ -141,11 +165,13 @@ pub async fn build_and_append_pdu(
// room_servers() and/or the if statement above
servers.remove(self.services.globals.server_name());
trace!("Sending PDU {} to {} servers", pdu.event_id(), servers.len());
self.services
.sending
.send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id)
.await?;
trace!("Event {} in room {:?} has been appended", pdu.event_id(), room_id);
Ok(pdu.event_id().to_owned())
}
@ -179,7 +205,7 @@ where
let count = self
.services
.state_cache
.room_members(pdu.room_id())
.room_members(&pdu.room_id_or_hash())
.ready_filter(|user| self.services.globals.user_is_local(user))
.ready_filter(|user| *user != target)
.boxed()
@ -203,7 +229,7 @@ where
let count = self
.services
.state_cache
.room_members(pdu.room_id())
.room_members(&pdu.room_id_or_hash())
.ready_filter(|user| self.services.globals.user_is_local(user))
.ready_filter(|user| *user != target)
.boxed()

View file

@ -1,5 +1,6 @@
use std::cmp;
use std::{cmp, collections::HashMap};
use conduwuit::{smallstr::SmallString, trace};
use conduwuit_core::{
Err, Error, Result, err, implement,
matrix::{
@ -11,12 +12,13 @@ use conduwuit_core::{
};
use futures::{StreamExt, TryStreamExt, future, future::ready};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId, UserId,
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId,
UserId,
canonical_json::to_canonical_value,
events::{StateEventType, TimelineEventType, room::create::RoomCreateEventContent},
uint,
};
use serde_json::value::to_raw_value;
use serde_json::value::{RawValue, to_raw_value};
use tracing::warn;
use super::RoomMutexGuard;
@ -26,10 +28,25 @@ pub async fn create_hash_and_sign_event(
&self,
pdu_builder: PduBuilder,
sender: &UserId,
room_id: &RoomId,
room_id: Option<&RoomId>,
_mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room
* state mutex */
) -> Result<(PduEvent, CanonicalJsonObject)> {
fn from_evt(
room_id: OwnedRoomId,
event_type: TimelineEventType,
content: Box<RawValue>,
) -> Result<RoomVersionId> {
if event_type == TimelineEventType::RoomCreate {
let content: RoomCreateEventContent = serde_json::from_str(content.get())?;
Ok(content.room_version)
} else {
Err(Error::InconsistentRoomState(
"non-create event for room of unknown version",
room_id,
))
}
}
let PduBuilder {
event_type,
content,
@ -38,44 +55,54 @@ pub async fn create_hash_and_sign_event(
redacts,
timestamp,
} = pdu_builder;
let prev_events: Vec<OwnedEventId> = self
// If there was no create event yet, assume we are creating a room
let room_version_id = match room_id {
| Some(room_id) => self
.services
.state
.get_room_version(room_id)
.await
.or_else(|_| from_evt(room_id.to_owned(), event_type.clone(), content.clone()))?,
| None => from_evt(
RoomId::new(self.services.globals.server_name()),
event_type.clone(),
content.clone(),
)?,
};
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
// TODO(hydra): Only create events can lack a room ID.
let prev_events: Vec<OwnedEventId> = match room_id {
| Some(room_id) =>
self.services
.state
.get_forward_extremities(room_id)
.take(20)
.map(Into::into)
.collect()
.await;
.await,
| None => Vec::new(),
};
// If there was no create event yet, assume we are creating a room
let room_version_id = self
.services
let auth_events: HashMap<(StateEventType, SmallString<[u8; 48]>), PduEvent> = match room_id {
| Some(room_id) =>
self.services
.state
.get_room_version(room_id)
.await
.or_else(|_| {
if event_type == TimelineEventType::RoomCreate {
let content: RoomCreateEventContent = serde_json::from_str(content.get())?;
Ok(content.room_version)
} else {
Err(Error::InconsistentRoomState(
"non-create event for room of unknown version",
room_id.to_owned(),
))
}
})?;
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
let auth_events = self
.services
.state
.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)
.await?;
.get_auth_events(
room_id,
&event_type,
sender,
state_key.as_deref(),
&content,
&room_version,
)
.await?,
| None => HashMap::new(),
};
// Our depth is the maximum depth of prev_events + 1
let depth = prev_events
let depth = match room_id {
| Some(_) => prev_events
.iter()
.stream()
.map(Ok)
@ -84,21 +111,28 @@ pub async fn create_hash_and_sign_event(
.ignore_err()
.ready_fold(uint!(0), cmp::max)
.await
.saturating_add(uint!(1));
.saturating_add(uint!(1)),
| None => uint!(1),
};
let mut unsigned = unsigned.unwrap_or_default();
if let Some(room_id) = room_id {
if let Some(state_key) = &state_key {
if let Ok(prev_pdu) = self
.services
.state_accessor
.room_state_get(room_id, &event_type.to_string().into(), state_key)
.room_state_get(room_id, &event_type.clone().to_string().into(), state_key)
.await
{
unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value());
unsigned.insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?);
unsigned
.insert("replaces_state".to_owned(), serde_json::to_value(prev_pdu.event_id())?);
.insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?);
unsigned.insert(
"replaces_state".to_owned(),
serde_json::to_value(prev_pdu.event_id())?,
);
}
}
}
@ -109,15 +143,15 @@ pub async fn create_hash_and_sign_event(
// The first two events in a room are always m.room.create and m.room.member,
// so any other events with that same depth are illegal.
warn!(
"Had unsafe depth {depth} when creating non-state event in {room_id}. Cowardly \
aborting"
"Had unsafe depth {depth} when creating non-state event in {}. Cowardly aborting",
room_id.expect("room_id is Some here").as_str()
);
return Err!(Request(Unknown("Unsafe depth for non-state event.")));
}
let mut pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: room_id.to_owned(),
room_id: room_id.map(ToOwned::to_owned),
sender: sender.to_owned(),
origin: None,
origin_server_ts: timestamp.map_or_else(
@ -152,11 +186,30 @@ pub async fn create_hash_and_sign_event(
ready(auth_events.get(&key).map(ToOwned::to_owned))
};
let room_id_or_hash = pdu.room_id_or_hash();
let create_pdu = match &pdu.kind {
| TimelineEventType::RoomCreate => None,
| _ => Some(
self.services
.state_accessor
.room_state_get(&room_id_or_hash, &StateEventType::RoomCreate, "")
.await
.map_err(|e| {
err!(Request(Forbidden(warn!("Failed to fetch room create event: {e}"))))
})?,
),
};
let create_event = match &pdu.kind {
| TimelineEventType::RoomCreate => &pdu,
| _ => create_pdu.as_ref().unwrap().as_pdu(),
};
let auth_check = state_res::auth_check(
&room_version,
&pdu,
None, // TODO: third_party_invite
auth_fetch,
create_event,
)
.await
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
@ -164,6 +217,11 @@ pub async fn create_hash_and_sign_event(
if !auth_check {
return Err!(Request(Forbidden("Event is not authorized.")));
}
trace!(
"Event {} in room {} is authorized",
pdu.event_id,
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
);
// Hash and sign
let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| {
@ -178,13 +236,13 @@ pub async fn create_hash_and_sign_event(
},
}
// Add origin because synapse likes that (and it's required in the spec)
pdu_json.insert(
"origin".to_owned(),
to_canonical_value(self.services.globals.server_name())
.expect("server name is a valid CanonicalJsonValue"),
);
trace!("hashing and signing event {}", pdu.event_id);
if let Err(e) = self
.services
.server_keys
@ -204,10 +262,18 @@ pub async fn create_hash_and_sign_event(
pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into()));
// Check with the policy server
// TODO(hydra): Skip this check for create events (why didnt we do this
// already?)
if room_id.is_some() {
trace!(
"Checking event {} in room {} with policy server",
pdu.event_id,
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
);
match self
.services
.event_handler
.ask_policy_server(&pdu, room_id)
.ask_policy_server(&pdu, &pdu.room_id_or_hash())
.await
{
| Ok(true) => {},
@ -221,13 +287,20 @@ pub async fn create_hash_and_sign_event(
warn!("Failed to check event with policy server: {e}");
},
}
}
// Generate short event id
trace!(
"Generating short event ID for {} in room {}",
pdu.event_id,
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
);
let _shorteventid = self
.services
.short
.get_or_create_shorteventid(&pdu.event_id)
.await;
trace!("New PDU created: {pdu:?}");
Ok((pdu, pdu_json))
}

View file

@ -39,7 +39,11 @@ pub async fn redact_pdu<Pdu: Event + Send + Sync>(
}
}
let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?;
let room_version_id = self
.services
.state
.get_room_version(&pdu.room_id_or_hash())
.await?;
pdu.redact(&room_version_id, reason.to_value())?;

View file

@ -798,7 +798,7 @@ impl Service {
let unread: UInt = self
.services
.user
.notification_count(&user_id, pdu.room_id())
.notification_count(&user_id, &pdu.room_id_or_hash())
.await
.try_into()
.expect("notification count can't go that high");

View file

@ -1,6 +1,6 @@
use std::borrow::Borrow;
use conduwuit::{Err, Result, implement};
use conduwuit::{Err, Result, debug, debug_error, implement};
use ruma::{
CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId,
api::federation::discovery::VerifyKey,
@ -19,9 +19,11 @@ pub async fn get_event_keys(
let required = match required_keys(object, version) {
| Ok(required) => required,
| Err(e) => {
debug_error!("Failed to determine keys required to verify: {e}");
return Err!(BadServerResponse("Failed to determine keys required to verify: {e}"));
},
};
debug!(?required, "Keys required to verify event");
let batch = required
.iter()
@ -61,6 +63,7 @@ where
}
#[implement(super::Service)]
#[tracing::instrument(skip(self))]
pub async fn get_verify_key(
&self,
origin: &ServerName,
@ -70,6 +73,7 @@ pub async fn get_verify_key(
let notary_only = self.services.server.config.only_query_trusted_key_servers;
if let Some(result) = self.verify_keys_for(origin).await.remove(key_id) {
debug!("Found key in cache");
return Ok(result);
}

View file

@ -8,7 +8,7 @@ mod verify;
use std::{collections::BTreeMap, sync::Arc, time::Duration};
use conduwuit::{
Result, Server, implement,
Result, Server, debug, debug_error, debug_warn, implement,
utils::{IterStream, timepoint_from_now},
};
use database::{Deserialized, Json, Map};
@ -112,6 +112,7 @@ async fn add_signing_keys(&self, new_keys: ServerSigningKeys) {
}
#[implement(Service)]
#[tracing::instrument(skip(self, object))]
pub async fn required_keys_exist(
&self,
object: &CanonicalJsonObject,
@ -119,10 +120,12 @@ pub async fn required_keys_exist(
) -> bool {
use ruma::signatures::required_keys;
debug!(?object, "Checking required keys exist");
let Ok(required_keys) = required_keys(object, version) else {
debug_error!("Failed to determine required keys");
return false;
};
debug!(?required_keys, "Required keys to verify event");
required_keys
.iter()
.flat_map(|(server, key_ids)| key_ids.iter().map(move |key_id| (server, key_id)))
@ -132,6 +135,7 @@ pub async fn required_keys_exist(
}
#[implement(Service)]
#[tracing::instrument(skip(self))]
pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool {
type KeysMap<'a> = BTreeMap<&'a ServerSigningKeyId, &'a RawJsonValue>;
@ -142,6 +146,7 @@ pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSignin
.await
.deserialized::<Raw<ServerSigningKeys>>()
else {
debug_warn!("No known signing keys found for {origin}");
return false;
};
@ -157,6 +162,7 @@ pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSignin
}
}
debug_warn!("Key {key_id} not found for {origin}");
false
}

View file

@ -1,4 +1,6 @@
use conduwuit::{Err, Result, implement, matrix::event::gen_event_id_canonical_json};
use conduwuit::{
Err, Result, debug, debug_warn, implement, matrix::event::gen_event_id_canonical_json,
};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified,
};
@ -28,18 +30,25 @@ pub async fn validate_and_add_event_id_no_fetch(
pdu: &RawJsonValue,
room_version: &RoomVersionId,
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
debug!(?pdu, "Validating PDU without fetching keys");
let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?;
debug!(event_id = event_id.as_str(), "Generated event ID, checking required keys");
if !self.required_keys_exist(&value, room_version).await {
debug_warn!(
"Event {event_id} is missing required keys, cannot verify without fetching keys"
);
return Err!(BadServerResponse(debug_warn!(
"Event {event_id} cannot be verified: missing keys."
)));
}
debug!("All required keys exist, verifying event");
if let Err(e) = self.verify_event(&value, Some(room_version)).await {
debug_warn!("Event verification failed");
return Err!(BadServerResponse(debug_error!(
"Event {event_id} failed verification: {e:?}"
)));
}
debug!("Event verified successfully");
value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into()));
@ -52,7 +61,7 @@ pub async fn verify_event(
event: &CanonicalJsonObject,
room_version: Option<&RoomVersionId>,
) -> Result<Verified> {
let room_version = room_version.unwrap_or(&RoomVersionId::V11);
let room_version = room_version.unwrap_or(&RoomVersionId::V12);
let keys = self.get_event_keys(event, room_version).await?;
ruma::signatures::verify_event(&keys, event, room_version).map_err(Into::into)
}
@ -63,7 +72,7 @@ pub async fn verify_json(
event: &CanonicalJsonObject,
room_version: Option<&RoomVersionId>,
) -> Result {
let room_version = room_version.unwrap_or(&RoomVersionId::V11);
let room_version = room_version.unwrap_or(&RoomVersionId::V12);
let keys = self.get_event_keys(event, room_version).await?;
ruma::signatures::verify_json(&keys, event.clone()).map_err(Into::into)
}

View file

@ -422,7 +422,7 @@ impl Service {
pub fn all_device_ids<'a>(
&'a self,
user_id: &'a UserId,
) -> impl Stream<Item = &DeviceId> + Send + 'a {
) -> impl Stream<Item = &'a DeviceId> + Send + 'a {
let prefix = (user_id, Interfix);
self.db
.userdeviceid_metadata
@ -770,7 +770,7 @@ impl Service {
user_id: &'a UserId,
from: u64,
to: Option<u64>,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
self.keys_changed_user_or_room(user_id.as_str(), from, to)
.map(|(user_id, ..)| user_id)
}
@ -781,7 +781,7 @@ impl Service {
room_id: &'a RoomId,
from: u64,
to: Option<u64>,
) -> impl Stream<Item = (&UserId, u64)> + Send + 'a {
) -> impl Stream<Item = (&'a UserId, u64)> + Send + 'a {
self.keys_changed_user_or_room(room_id.as_str(), from, to)
}
@ -790,7 +790,7 @@ impl Service {
user_or_room_id: &'a str,
from: u64,
to: Option<u64>,
) -> impl Stream<Item = (&UserId, u64)> + Send + 'a {
) -> impl Stream<Item = (&'a UserId, u64)> + Send + 'a {
type KeyVal<'a> = ((&'a str, u64), &'a UserId);
let to = to.unwrap_or(u64::MAX);