Compare commits

...

62 commits

Author SHA1 Message Date
Jacob Taylor
098eb8abca Merge remote-tracking branch 'origin/main' into illegal-car-mods 2025-09-06 10:19:59 -07:00
Jade Ellis
cd238b05de
fix: Remove bad colon in workflow 2025-09-06 16:21:21 +01:00
Jade Ellis
c0e3829fed
feat: Replace Jaeger with OTLP 2025-09-06 16:19:56 +01:00
Jade Ellis
1d7dda6cf5
chore: Upgrade ctor, cbor 2025-09-06 16:19:56 +01:00
Jade Ellis
6f19931c5b
chore(deps): Upgrade minor incompatible dependencies 2025-09-06 16:19:56 +01:00
Tom Foster
2516e783ba ci: Support optional persistent BuildKit endpoints in Docker builds
Allows us to use runners with persistent BuildKit containers for improved
caching and faster build times. Falls back to standard docker-container
driver when BUILDKIT_ENDPOINT environment variable is not set.
2025-09-06 16:05:51 +01:00
Jade Ellis
fdf5771387
ci: Fix CI not triggering on external pull requests 2025-09-06 15:21:39 +01:00
Ginger
58bbc0e676 fix: Move packaging files from dist/ to pkg/ 2025-09-06 14:03:57 +00:00
Ginger
0d58e660a2 fix: Remove unnecessary user and directory modifications
systemd creates a dynamic user for
continuwuity and manages directories for
it automatically, so the debian postinst
script no longer needs to do that.
2025-09-06 14:03:57 +00:00
Ginger
e7124edb73 fix: Update debian systemd unit path 2025-09-06 14:03:57 +00:00
Ginger
d19e0f0d97 feat: Move packaging scripts into dist/ and consolidate the service files 2025-09-06 14:03:57 +00:00
nex
467aed3028 chore: Add Ginger's GH noreply email to mailmap 2025-09-02 16:36:56 +00:00
Ginger
99b44bbf09 Update conduwuit-example.toml 2025-09-01 17:50:09 +00:00
Ginger
95aeff8cdc Set the DB path as an env var in systemd service files to prevent footgunning 2025-09-01 17:50:09 +00:00
nexy7574
9e62e66ae4 chore(PR956): Update admin docs 2025-09-01 11:27:58 +00:00
nexy7574
76b93e252d feat: Only inject vias when manual ones aren't provided during join 2025-09-01 11:27:58 +00:00
nexy7574
66d479e2eb fix: Make remote leave helper a public fn 2025-09-01 11:27:58 +00:00
nexy7574
241371463e feat: Force leave remote rooms admin command 2025-09-01 11:27:58 +00:00
nexy7574
d970df5fd2
perf(MSC4323): Parallelise some check futs 2025-09-01 12:13:37 +01:00
nexy7574
4e644961f3
perf(MSC4323): Remove redundant authorisation checks 2025-09-01 12:13:37 +01:00
nexy7574
35cf9af5c8
feat(MSC4323): Add versions flag 2025-09-01 12:13:37 +01:00
nexy7574
04e796176a
style(MSC4323): Satisfy our linting overlords 2025-09-01 12:13:37 +01:00
nexy7574
9783940105
feat(MSC4323): Advertise suspension support in capabilities 2025-09-01 12:13:37 +01:00
nexy7574
1e430f9470
feat(MSC4323): Implement agnostic suspension endpoint 2025-09-01 12:13:37 +01:00
Renovate Bot
5cce024841 chore(deps): update https://github.com/reproducible-containers/buildkit-cache-dance action to v3.3.0 2025-08-31 00:44:28 +00:00
Jade Ellis
e87c461b8d
feat: Cache renovate data, RO GitHub token 2025-08-31 01:37:50 +01:00
Jade Ellis
b934898f51
chore: Update renovate config, limit cargo updates 2025-08-31 00:25:41 +01:00
nexy7574
83e3de55a4
fix(sync/v2): Room leaves being omitted incorrectly
Partially borrowed from 85a84f93c7
2025-08-30 16:18:46 +01:00
Tom Foster
609e239436 fix(fedora): Correct linting issues in RPM spec file
The Fedora RPM packaging files added in PR #950 weren't passing pre-commit
checks, causing CI failures for any branches rebased after that merge. This
applies prek linting fixes (typo correction, trailing whitespace removal,
and EOF newline) to ensure CI passes for all contributors.
2025-08-30 16:10:41 +01:00
Ginger
34417c96ae Update URL to point at the landing page 2025-08-28 21:10:46 +00:00
Ginger
f33f281edb Update long description to match deb package 2025-08-28 21:10:46 +00:00
Ginger
ddbca59193 Add spec and service files for creating an RPM package 2025-08-28 21:10:46 +00:00
Tom Foster
b5a2e49ae4 fix: Resolve Clippy CI failures from elided lifetime warnings
The latest Rust nightly compiler (2025-08-27) introduced the
elided-named-lifetimes lint which causes Clippy CI checks to fail
when an elided lifetime ('_) resolves to a named lifetime that's
already in scope.

This commit fixes the Clippy warnings by:
- Making lifetime relationships explicit where 'a is already in scope
- Keeping elided lifetimes ('_) in functions without explicit
  lifetime parameters
- Ensuring proper lifetime handling in the database pool module

Affected files (17 total):
- Database map modules: Handle, Key, and KeyVal references in get,
  qry, keys, and stream operations
- Database pool module: into_recv_seek function

This change resolves the CI build failures without changing any
functionality, ensuring the codebase remains compatible with the
latest nightly Clippy checks.
2025-08-28 21:13:19 +01:00
Jade Ellis
37248a4f68
chore: Add reasons for test skips 2025-08-28 20:10:05 +01:00
Tom Foster
dd22325ea2 refactor(ci): Consolidate Rust checks with optimised toolchain setup
Merge rust-checks.yml into prek-checks.yml for a unified workflow that
runs formatting and clippy/test checks in parallel jobs.

Add reusable composite actions:
- setup-rust: Smart Rust toolchain management with caching
  * Uses cargo-binstall for pre-built binary downloads
  * Integrates Mozilla sccache-action for compilation caching
  * Workspace-relative paths for better cache control
  * GitHub token support for improved rate limits
- setup-llvm-with-apt: LLVM installation with native dependencies
- detect-runner-os: Consistent OS detection for cache keys

Key improvements:
- Install prek via cargo-binstall --git (crates.io outdated at v0.0.1)
- Download timelord-cli from cargo-quickinstall
- Set BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10 to avoid rate limit delays
- Default Rust version 1.87.0 with override support
- Remove redundant sccache stats (handled by Mozilla action)

Significantly reduces CI runtime through binary downloads instead of
compilation while maintaining all existing quality checks.
2025-08-28 19:20:14 +01:00
nex
30a56d5cb9
Update renovate.json 2025-08-28 17:15:32 +00:00
nexy7574
3183210459
fix: Post-merge compile issues 2025-08-23 21:28:31 +01:00
RatCornu
57d7743037 feat: add ldap_only config option 2025-08-23 19:59:36 +00:00
Jade Ellis
cb09bfa4e7 fix: Correctly pass ldap feature from the default crate 2025-08-23 19:59:36 +00:00
Jade Ellis
0ed691edef fix: Make builds without LDAP work correctly 2025-08-23 19:59:36 +00:00
Jade Ellis
c58b9f05ed chore: Fix default attributes for config 2025-08-23 19:59:36 +00:00
RatCornu
fb7e739b72 chore: remove unused LDAP mail attribute 2025-08-23 19:59:36 +00:00
RatCornu
c7adbae03f feat: ldap login 2025-08-23 19:59:36 +00:00
Jade Ellis
8b35de6a43
chore: Fix clippy lints with minimal diff 2025-08-22 00:51:54 +01:00
aviac
d191494f18
chore(nix): update fenix input
This is required, since now we're installing `rustfmt` from the latest
state of the fenix repo. This wasn't recent enough for the latest rust
version. The input was locked at (2025-07-02). Now it's up to date.
2025-08-22 00:37:16 +01:00
aviac
6d1f12b22d
chore(nix): make rustfmt-nightly available to default dev shell
I verified this by running `rustfmt --version` on my system. Note that I
don't have a system-wide install of rust and only rely on dev shells, so
this can't possibly come from somewhere else.

```
$ rustfmt --version
rustfmt 1.8.0-nightly (6677875279 2025-07-02)
```
2025-08-22 00:37:16 +01:00
aviac
ca3ee9224b
chore(rust): drop rustfmt from rust-toolchain.toml
This just installs regular rustfmt, which is not needed in this project.
One could say "It doesn't hurt", but in the NixOS dev shell it actually
does since it will shadow nightly rustfmt and we don't have the
`cargo +nightly fmt` synatx on NixOS that is available on other Distros.

Also "It doesn't hurt" to delete it for non NixOS users.
2025-08-22 00:37:16 +01:00
aviac
427b973b67
chore(rust): bump version 1.87 -> 1.89
- bump version in rust-toolchain.toml
- update sha in flake.nix
2025-08-22 00:32:04 +01:00
Tom Foster
aacaf5a2a0 fix(ci): Downgrade setup-uv action from v6 to v5
The setup-uv@v6 action has deprecated Node 18 support mid-version by
using the File API, causing workflow failures. Temporarily downgrading
to v5 until we migrate to a better runner image with Node 20+ support.
2025-08-21 21:10:15 +01:00
aviac
256bed992e
chore(nix): exec 'use flake' with direnv on NixOS systems 2025-08-21 13:40:11 +02:00
aviac
ecb87ccd1c
chore(nix): bump rocksdb version in flake.nix to 10.4.fb
This works without any further changes. Multiple people in the matrix
room (including myself) have reported that the built executable runs
fine with this. Nevertheless, there might be room for improvements (in
future commits)
2025-08-21 13:39:36 +02:00
Tom Foster
14a4b24fc5 fix(ci): Configure Renovate for Forgejo platform
- Set platform to 'forgejo' with proper API endpoint
- Use environment variables for all Renovate configuration
- Add git timeout and disable GitHub token warnings
- Move PR limit configuration to workflow
2025-08-17 17:37:24 +01:00
Tom Foster
731761f0fc Merge branch 'main' into tom/prek-was-prefligit 2025-08-17 15:08:44 +00:00
Tom Foster
4524a00fc6 chore(ci): Remove obsolete prefligit action
Now using prek directly via uvx, this custom action is no longer needed.
2025-08-17 16:00:42 +01:00
Tom Foster
9db750e97c fix(ci): Add full GitHub URL to renovate action
Forgejo's runner doesn't automatically assume actions are on github.com,
so we need to specify the full URL.
2025-08-17 15:51:29 +01:00
Tom Foster
b14a4d470b Merge branch 'main' into tom/prek-was-prefligit 2025-08-17 14:16:35 +00:00
Tom Foster
5d1f141882 ci: Rename prefligit-checks.yml to prek-checks.yml
Rename workflow file to match the updated tool name.
2025-08-17 15:13:02 +01:00
Tom Foster
b447cfff56 ci: Update prefligit to prek
The prefligit project has been renamed to prek due to typosquatting
concerns. This updates our CI to use the new name and recommended
installation method via uv, which significantly reduces setup time
compared to cargo install and includes automatic caching.

- Replace outdated static prefligit action with direct prek invocation
- Use uv as recommended by upstream: https://github.com/j178/prek
- Update check-byte-order-marker to fix-byte-order-marker (deprecated)
- Simplify workflow by removing unused ref calculations

The same .pre-commit-config.yaml works unchanged. Developers can
install locally with 'uvx prek install' or other methods from the repo.
2025-08-17 15:11:38 +01:00
Tom Foster
283888e788 Merge branch 'main' into tom/renovate 2025-08-17 13:27:33 +00:00
Tom Foster
f54e59a068 ci: Add Renovate for automated dependency management
Configures Renovate bot to create PRs for outdated dependencies.
Runs daily at 5am UTC with manual trigger via workflow_dispatch.

Configuration:
- Ignores custom forks (jemalloc, telemetry packages)
- Groups: GHA minor/patch, Rust toolchain, lockfile, Rust patches
- Limits: 3 concurrent PRs, 2 PRs per hour
- Supports: Cargo, GitHub Actions, Nix
2025-08-17 14:20:20 +01:00
Tom Foster
2a183cc5a4 fix(build): Remove hardened_malloc from full feature set
The hardened_malloc feature conflicts with jemalloc, preventing successful
builds with the --features full flag. Commenting out hardened_malloc allows
the full profile to build correctly while maintaining all other features.
2025-08-17 13:44:32 +01:00
nexy7574
54acd07555
fix: Drop fake room v2 support 2025-08-16 16:22:24 +01:00
102 changed files with 2666 additions and 1117 deletions

View file

@ -26,3 +26,7 @@ max_line_length = 98
[*.yml]
indent_size = 2
indent_style = space
[*.json]
indent_size = 4
indent_style = space

4
.envrc
View file

@ -2,6 +2,8 @@
dotenv_if_exists
# use flake ".#${DIRENV_DEVSHELL:-default}"
if [ -f /etc/os-release ] && grep -q '^ID=nixos' /etc/os-release; then
use flake ".#${DIRENV_DEVSHELL:-default}"
fi
PATH_add bin

View file

@ -0,0 +1,39 @@
name: detect-runner-os
description: |
Detect the actual OS name and version of the runner.
Provides separate outputs for name, version, and a combined slug.
outputs:
name:
description: 'OS name (e.g. Ubuntu, Debian)'
value: ${{ steps.detect.outputs.name }}
version:
description: 'OS version (e.g. 22.04, 11)'
value: ${{ steps.detect.outputs.version }}
slug:
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
value: ${{ steps.detect.outputs.slug }}
runs:
using: composite
steps:
- name: Detect runner OS
id: detect
shell: bash
run: |
# Detect OS version (try lsb_release first, fall back to /etc/os-release)
OS_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d'"' -f2)
# Detect OS name and capitalise (try lsb_release first, fall back to /etc/os-release)
OS_NAME=$(lsb_release -is 2>/dev/null || grep "^ID=" /etc/os-release | cut -d'=' -f2 | tr -d '"' | sed 's/\b\(.\)/\u\1/g')
# Create combined slug
OS_SLUG="${OS_NAME}-${OS_VERSION}"
# Set outputs
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
# Log detection results
echo "🔍 Detected Runner OS: ${OS_NAME} ${OS_VERSION}"

View file

@ -1,27 +0,0 @@
name: prefligit
description: |
Runs prefligit, pre-commit reimplemented in Rust.
inputs:
extra_args:
description: options to pass to pre-commit run
required: false
default: '--all-files'
runs:
using: composite
steps:
- name: Install uv
uses: https://github.com/astral-sh/setup-uv@v6
with:
enable-cache: true
ignore-nothing-to-cache: true
- name: Install Prefligit
shell: bash
run: |
curl --proto '=https' --tlsv1.2 -LsSf https://github.com/j178/prefligit/releases/download/v0.0.10/prefligit-installer.sh | sh
- uses: actions/cache@v3
with:
path: ~/.cache/prefligit
key: prefligit-0|${{ hashFiles('.pre-commit-config.yaml') }}
- run: prefligit run --show-diff-on-failure --color=always -v ${{ inputs.extra_args }}
shell: bash

View file

@ -0,0 +1,167 @@
name: setup-llvm-with-apt
description: |
Set up LLVM toolchain with APT package management and smart caching.
Supports cross-compilation architectures and additional package installation.
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
inputs:
dpkg-arch:
description: 'Debian architecture for cross-compilation (e.g. arm64)'
required: false
default: ''
extra-packages:
description: 'Additional APT packages to install (space-separated)'
required: false
default: ''
llvm-version:
description: 'LLVM version to install'
required: false
default: '20'
outputs:
llvm-version:
description: 'Installed LLVM version'
value: ${{ steps.configure.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: ./.forgejo/actions/detect-runner-os
- name: Configure cross-compilation architecture
if: inputs.dpkg-arch != ''
shell: bash
run: |
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
# Restrict default sources to amd64
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
# Add ports sources for foreign architecture
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
EOF
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
- name: Start LLVM cache group
shell: bash
run: echo "::group::📦 Restoring LLVM cache"
- name: Check for LLVM cache
id: cache
uses: https://github.com/actions/cache@v4
with:
path: |
/usr/bin/clang-*
/usr/bin/clang++-*
/usr/bin/lld-*
/usr/bin/llvm-*
/usr/lib/llvm-*/
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
/usr/lib/x86_64-linux-gnu/libclang*.so*
/etc/apt/sources.list.d/archive_uri-*
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
key: llvm-${{ steps.runner-os.outputs.slug }}-v${{ inputs.llvm-version }}-v3-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
- name: End LLVM cache group
shell: bash
run: echo "::endgroup::"
- name: Check and install LLVM if needed
id: llvm-setup
shell: bash
run: |
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
# Check both binaries and libraries exist
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
echo "needs-install=false" >> $GITHUB_OUTPUT
else
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
echo "::endgroup::"
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
exit 1
fi
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
echo "needs-install=true" >> $GITHUB_OUTPUT
fi
- name: Prepare for additional packages
if: inputs.extra-packages != ''
shell: bash
run: |
# Update APT if LLVM was cached (installer script already does apt-get update)
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
sudo apt-get update
echo "::endgroup::"
fi
echo "::group::📦 Installing additional packages"
- name: Install additional packages
if: inputs.extra-packages != ''
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
with:
packages: ${{ inputs.extra-packages }}
version: 1.0
- name: End package installation group
if: inputs.extra-packages != ''
shell: bash
run: echo "::endgroup::"
- name: Configure LLVM environment
id: configure
shell: bash
run: |
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
# Create symlinks
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
echo " ✓ Created symlinks"
# Setup library paths
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
if [ -d "$LLVM_LIB_PATH" ]; then
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
sudo ldconfig
echo " ✓ Configured library paths"
else
# Fallback to standard library location
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
echo " ✓ Using fallback library path"
fi
fi
# Set output
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
echo "::endgroup::"
echo "✅ LLVM ready: $(clang --version | head -1)"

View file

@ -0,0 +1,236 @@
name: setup-rust
description: |
Set up Rust toolchain with sccache for compilation caching.
Respects rust-toolchain.toml by default or accepts explicit version override.
inputs:
cache-key-suffix:
description: 'Optional suffix for cache keys (e.g. platform identifier)'
required: false
default: ''
rust-components:
description: 'Additional Rust components to install (space-separated)'
required: false
default: ''
rust-target:
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
required: false
default: ''
rust-version:
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
required: false
default: '1.87.0'
sccache-cache-limit:
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
required: false
default: '2G'
github-token:
description: 'GitHub token for downloading sccache from GitHub releases'
required: false
default: ''
outputs:
rust-version:
description: 'Installed Rust version'
value: ${{ steps.rust-setup.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: ./.forgejo/actions/detect-runner-os
- name: Configure Cargo environment
shell: bash
run: |
# Use workspace-relative paths for better control and consistency
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
# Limit binstall resolution timeout to avoid GitHub rate limit delays
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
# Ensure directories exist for first run
mkdir -p "${{ github.workspace }}/.cargo"
mkdir -p "${{ github.workspace }}/.sccache"
mkdir -p "${{ github.workspace }}/target"
mkdir -p "${{ github.workspace }}/.rustup"
- name: Start cache restore group
shell: bash
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
- name: Cache Cargo registry and git
id: registry-cache
uses: https://github.com/actions/cache@v4
with:
path: |
.cargo/registry/index
.cargo/registry/cache
.cargo/git/db
# Registry cache saved per workflow, restored from any workflow's cache
# Each workflow maintains its own registry that accumulates its needed crates
key: cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ github.workflow }}
restore-keys: |
cargo-registry-${{ steps.runner-os.outputs.slug }}-
- name: Cache toolchain binaries
id: toolchain-cache
uses: https://github.com/actions/cache@v4
with:
path: |
.cargo/bin
.rustup/toolchains
.rustup/update-hashes
# Shared toolchain cache across all Rust versions
key: toolchain-${{ steps.runner-os.outputs.slug }}
- name: Debug GitHub token availability
shell: bash
run: |
if [ -z "${{ inputs.github-token }}" ]; then
echo "⚠️ No GitHub token provided - sccache will use fallback download method"
else
echo "✅ GitHub token provided for sccache"
fi
- name: Setup sccache
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
with:
token: ${{ inputs.github-token }}
- name: Cache build artifacts
id: build-cache
uses: https://github.com/actions/cache@v4
with:
path: |
target/**/deps
!target/**/deps/*.rlib
target/**/build
target/**/.fingerprint
target/**/incremental
target/**/*.d
/timelord/
# Build artifacts - cache per code change, restore from deps when code changes
key: >-
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
restore-keys: |
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
- name: End cache restore group
shell: bash
run: echo "::endgroup::"
- name: Setup Rust toolchain
shell: bash
run: |
# Install rustup if not already cached
if ! command -v rustup &> /dev/null; then
echo "::group::📦 Installing rustup"
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
source "$CARGO_HOME/env"
echo "::endgroup::"
else
echo "✅ rustup already available"
fi
# Setup the appropriate Rust version
if [[ -n "${{ inputs.rust-version }}" ]]; then
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
# Set override first to prevent rust-toolchain.toml from auto-installing
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
# Check if we need to install/update the toolchain
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
rustup update ${{ inputs.rust-version }}
else
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
fi
else
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
rustup show
fi
echo "::endgroup::"
- name: Configure PATH and install tools
shell: bash
env:
GITHUB_TOKEN: ${{ inputs.github-token }}
run: |
# Add .cargo/bin to PATH permanently for all subsequent steps
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
# Install cargo-binstall for fast binary installations
if command -v cargo-binstall &> /dev/null; then
echo "✅ cargo-binstall already available"
else
echo "::group::📦 Installing cargo-binstall"
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
echo "::endgroup::"
fi
if command -v prek &> /dev/null; then
echo "✅ prek already available"
else
echo "::group::📦 Installing prek"
# prek isn't regularly published to crates.io, so we use git source
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
echo "::endgroup::"
fi
if command -v timelord &> /dev/null; then
echo "✅ timelord already available"
else
echo "::group::📦 Installing timelord"
cargo-binstall -y --no-symlinks timelord-cli
echo "::endgroup::"
fi
- name: Configure sccache environment
shell: bash
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
# Configure incremental compilation GC
# If we restored from old cache (partial hit), clean up aggressively
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
echo "♻️ Partial cache hit - enabling cache cleanup"
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
fi
- name: Install Rust components
if: inputs.rust-components != ''
shell: bash
run: |
echo "📦 Installing components: ${{ inputs.rust-components }}"
rustup component add ${{ inputs.rust-components }}
- name: Install Rust target
if: inputs.rust-target != ''
shell: bash
run: |
echo "📦 Installing target: ${{ inputs.rust-target }}"
rustup target add ${{ inputs.rust-target }}
- name: Output version and summary
id: rust-setup
shell: bash
run: |
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
echo "📋 Setup complete:"
echo " Rust: $(rustc --version)"
echo " Cargo: $(cargo --version)"
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"

View file

@ -1,22 +0,0 @@
name: Checks / Prefligit
on:
push:
pull_request:
permissions:
contents: read
jobs:
prefligit:
runs-on: ubuntu-latest
env:
FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }}
TO_REF: ${{ github.sha }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.forgejo/actions/prefligit
with:
extra_args: --all-files --hook-stage manual

View file

@ -0,0 +1,83 @@
name: Checks / Prek
on:
pull_request:
push:
branches:
- main
workflow_dispatch:
permissions:
contents: read
jobs:
fast-checks:
name: Pre-commit & Formatting
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup Rust nightly
uses: ./.forgejo/actions/setup-rust
with:
rust-version: nightly
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run prek
run: |
prek run \
--all-files \
--hook-stage manual \
--show-diff-on-failure \
--color=always \
-v
- name: Check Rust formatting
run: |
cargo +nightly fmt --all -- --check && \
echo "✅ Formatting check passed" || \
exit 1
clippy-and-tests:
name: Clippy and Cargo Tests
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup LLVM
uses: ./.forgejo/actions/setup-llvm-with-apt
with:
extra-packages: liburing-dev liburing2
- name: Setup Rust with caching
uses: ./.forgejo/actions/setup-rust
with:
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run Clippy lints
run: |
cargo clippy \
--workspace \
--features full \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Run Cargo tests
run: |
cargo test \
--workspace \
--features full \
--locked \
--profile test \
--all-targets \
--no-fail-fast

View file

@ -3,14 +3,26 @@ concurrency:
group: "release-image-${{ github.ref }}"
on:
push:
pull_request:
paths-ignore:
- "*.md"
- "**/*.md"
- ".gitlab-ci.yml"
- ".gitignore"
- "renovate.json"
- "debian/**"
- "pkg/**"
- "docker/**"
- "docs/**"
push:
branches:
- main
paths-ignore:
- "*.md"
- "**/*.md"
- ".gitlab-ci.yml"
- ".gitignore"
- "renovate.json"
- "pkg/**"
- "docker/**"
- "docs/**"
# Allows you to run this workflow manually from the Actions tab
@ -93,6 +105,10 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
@ -161,7 +177,7 @@ jobs:
var-lib-apt-${{ matrix.slug }}
key: var-lib-apt-${{ matrix.slug }}
- name: inject cache into docker
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
with:
cache-map: |
{
@ -250,6 +266,10 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
- name: Extract metadata (tags) for Docker
id: meta

View file

@ -0,0 +1,111 @@
name: Maintenance / Renovate
on:
schedule:
# Run at 5am UTC daily to avoid late-night dev
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
dryRun:
description: 'Dry run mode'
required: false
default: null
type: choice
options:
- null
- 'extract'
- 'lookup'
- 'full'
logLevel:
description: 'Log level'
required: false
default: 'info'
type: choice
options:
- 'info'
- 'warning'
- 'critical'
push:
branches:
- main
paths:
# Re-run when config changes
- '.forgejo/workflows/renovate.yml'
- 'renovate.json'
jobs:
renovate:
name: Renovate
runs-on: ubuntu-latest
container:
image: ghcr.io/renovatebot/renovate:41
options: --tmpfs /tmp:exec
steps:
- name: Checkout
uses: actions/checkout@v4
with:
show-progress: false
- name: print node heap
run: /usr/local/renovate/node -e 'console.log(`node heap limit = ${require("v8").getHeapStatistics().heap_size_limit / (1024 * 1024)} Mb`)'
- name: Restore renovate repo cache
uses: https://github.com/actions/cache@v4
with:
path: |
/tmp/renovate/cache/renovate/repository
key: repo-cache-${{ github.run_id }}
restore-keys: |
repo-cache-
- name: Restore renovate package cache
uses: https://github.com/actions/cache@v4
with:
path: |
/tmp/renovate/cache/renovate/renovate-cache-sqlite
key: package-cache-${{ github.run_id }}
restore-keys: |
package-cache-
- name: Self-hosted Renovate
uses: https://github.com/renovatebot/github-action@v43.0.9
env:
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
RENOVATE_PLATFORM: forgejo
RENOVATE_ENDPOINT: ${{ github.server_url }}
RENOVATE_AUTODISCOVER: 'false'
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
RENOVATE_GIT_TIMEOUT: 60000
RENOVATE_REQUIRE_CONFIG: 'required'
RENOVATE_ONBOARDING: 'false'
RENOVATE_PR_COMMITS_PER_RUN_LIMIT: 3
RENOVATE_GITHUB_TOKEN_WARN: 'false'
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
GITHUB_COM_TOKEN: ${{ secrets.GH_PUBLIC_RO }}
RENOVATE_REPOSITORY_CACHE: 'enabled'
RENOVATE_X_SQLITE_PACKAGE_CACHE: true
- name: Save renovate repo cache
if: always() && env.RENOVATE_DRY_RUN != 'full'
uses: https://github.com/actions/cache@v4
with:
path: |
/tmp/renovate/cache/renovate/repository
key: repo-cache-${{ github.run_id }}
- name: Save renovate package cache
if: always() && env.RENOVATE_DRY_RUN != 'full'
uses: https://github.com/actions/cache@v4
with:
path: |
/tmp/renovate/cache/renovate/renovate-cache-sqlite
key: package-cache-${{ github.run_id }}

View file

@ -1,144 +0,0 @@
name: Checks / Rust
on:
push:
jobs:
format:
name: Format
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
with:
toolchain: "nightly"
components: "rustfmt"
- name: Check formatting
run: |
cargo +nightly fmt --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Clippy
run: |
cargo clippy \
--workspace \
--features full \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Show sccache stats
if: always()
run: sccache --show-stats
cargo-test:
name: Cargo Test
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Cargo Test
run: |
cargo test \
--workspace \
--features full \
--locked \
--profile test \
--all-targets \
--no-fail-fast
- name: Show sccache stats
if: always()
run: sccache --show-stats

View file

@ -13,3 +13,4 @@ Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
Timo Kösters <timo@koesters.xyz>
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>

View file

@ -9,7 +9,7 @@ repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-byte-order-marker
- id: fix-byte-order-marker
- id: check-case-conflict
- id: check-symlinks
- id: destroyed-symlinks

1267
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -48,15 +48,15 @@ features = ["ffi", "std", "union"]
version = "0.6.2"
[workspace.dependencies.ctor]
version = "0.2.9"
version = "0.5.0"
[workspace.dependencies.cargo_toml]
version = "0.21"
version = "0.22"
default-features = false
features = ["features"]
[workspace.dependencies.toml]
version = "0.8.14"
version = "0.9.5"
default-features = false
features = ["parse"]
@ -352,7 +352,7 @@ version = "0.1.2"
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
#branch = "conduwuit-changes"
rev = "b753738047d1f443aca870896ef27ecaacf027da"
rev = "8fb268fa2771dfc3a1c8075ef1246e7c9a0a53fd"
features = [
"compat",
"rand",
@ -411,25 +411,28 @@ default-features = false
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
[workspace.dependencies.opentelemetry]
version = "0.21.0"
version = "0.30.0"
[workspace.dependencies.tracing-flame]
version = "0.2.0"
[workspace.dependencies.tracing-opentelemetry]
version = "0.22.0"
version = "0.31.0"
[workspace.dependencies.opentelemetry_sdk]
version = "0.21.2"
version = "0.30.0"
features = ["rt-tokio"]
[workspace.dependencies.opentelemetry-jaeger]
version = "0.20.0"
features = ["rt-tokio"]
[workspace.dependencies.opentelemetry-otlp]
version = "0.30.0"
features = ["http", "trace", "logs", "metrics"]
[workspace.dependencies.opentelemetry-jaeger-propagator]
version = "0.30.0"
# optional sentry metrics for crash/panic reporting
[workspace.dependencies.sentry]
version = "0.37.0"
version = "0.42.0"
default-features = false
features = [
"backtrace",
@ -445,9 +448,9 @@ features = [
]
[workspace.dependencies.sentry-tracing]
version = "0.37.0"
version = "0.42.0"
[workspace.dependencies.sentry-tower]
version = "0.37.0"
version = "0.42.0"
# jemalloc usage
[workspace.dependencies.tikv-jemalloc-sys]
@ -476,7 +479,7 @@ features = ["use_std"]
version = "0.4"
[workspace.dependencies.nix]
version = "0.29.0"
version = "0.30.1"
default-features = false
features = ["resource"]
@ -498,7 +501,7 @@ version = "0.4.3"
default-features = false
[workspace.dependencies.termimad]
version = "0.31.2"
version = "0.34.0"
default-features = false
[workspace.dependencies.checked_ops]
@ -536,16 +539,21 @@ version = "0.2"
version = "0.2"
[workspace.dependencies.minicbor]
version = "0.26.3"
version = "2.1.1"
features = ["std"]
[workspace.dependencies.minicbor-serde]
version = "0.4.1"
version = "0.6.0"
features = ["std"]
[workspace.dependencies.maplit]
version = "1.0.2"
[workspace.dependencies.ldap3]
version = "0.11.5"
default-features = false
features = ["sync", "tls-rustls"]
#
# Patches
#
@ -759,25 +767,6 @@ incremental = true
[profile.dev.package.conduwuit_core]
inherits = "dev"
#rustflags = [
# '--cfg', 'conduwuit_mods',
# '-Ztime-passes',
# '-Zmir-opt-level=0',
# '-Ztls-model=initial-exec',
# '-Cprefer-dynamic=true',
# '-Zstaticlib-prefer-dynamic=true',
# '-Zstaticlib-allow-rdylib-deps=true',
# '-Zpacked-bundled-libs=false',
# '-Zplt=true',
# '-Clink-arg=-Wl,--as-needed',
# '-Clink-arg=-Wl,--allow-shlib-undefined',
# '-Clink-arg=-Wl,-z,lazy',
# '-Clink-arg=-Wl,-z,unique',
# '-Clink-arg=-Wl,-z,nodlopen',
# '-Clink-arg=-Wl,-z,nodelete',
#]
[profile.dev.package.xtask-generate-commands]
inherits = "dev"
[profile.dev.package.conduwuit]
inherits = "dev"
#rustflags = [
@ -867,7 +856,7 @@ unused-qualifications = "warn"
#unused-results = "warn" # TODO
## some sadness
elided_named_lifetimes = "allow" # TODO!
mismatched_lifetime_syntaxes = "allow" # TODO!
let_underscore_drop = "allow"
missing_docs = "allow"
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
@ -1006,3 +995,6 @@ literal_string_with_formatting_args = { level = "allow", priority = 1 }
needless_raw_string_hashes = "allow"
# TODO: Enable this lint & fix all instances
collapsible_if = "allow"

View file

@ -1,83 +0,0 @@
[Unit]
Description=Continuwuity - Matrix homeserver
Wants=network-online.target
After=network-online.target
Documentation=https://continuwuity.org/
RequiresMountsFor=/var/lib/private/conduwuit
Alias=matrix-conduwuit.service
[Service]
DynamicUser=yes
Type=notify-reload
ReloadSignal=SIGUSR1
TTYPath=/dev/tty25
DeviceAllow=char-tty
StandardInput=tty-force
StandardOutput=tty
StandardError=journal+console
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
TTYReset=yes
# uncomment to allow buffer to be cleared every restart
TTYVTDisallocate=no
TTYColumns=120
TTYRows=40
AmbientCapabilities=
CapabilityBoundingSet=
DevicePolicy=closed
LockPersonality=yes
MemoryDenyWriteExecute=yes
NoNewPrivileges=yes
#ProcSubset=pid
ProtectClock=yes
ProtectControlGroups=yes
ProtectHome=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectKernelTunables=yes
ProtectProc=invisible
ProtectSystem=strict
PrivateDevices=yes
PrivateMounts=yes
PrivateTmp=yes
PrivateUsers=yes
PrivateIPC=yes
RemoveIPC=yes
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
StateDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Environment=CONTINUWUITY_CONFIG=%d/config.toml
LoadCredential=config.toml:/etc/conduwuit/conduwuit.toml
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
ExecStart=/usr/bin/conduwuit
Restart=on-failure
RestartSec=5
TimeoutStopSec=4m
TimeoutStartSec=4m
StartLimitInterval=1m
StartLimitBurst=5
[Install]
WantedBy=multi-user.target

View file

@ -79,9 +79,11 @@
# This is the only directory where continuwuity will save its data,
# including media. Note: this was previously "/var/lib/matrix-conduit".
#
# YOU NEED TO EDIT THIS.
# YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
# `systemd` service. The service file sets it to `/var/lib/conduwuit`
# using an environment variable and also grants write access.
#
# example: "/var/lib/continuwuity"
# example: "/var/lib/conduwuit"
#
#database_path =
@ -589,13 +591,19 @@
#
#default_room_version = 11
# This item is undocumented. Please contribute documentation for it.
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
# Jaeger) that supports the OpenTelemetry Protocol.
#
#allow_jaeger = false
# Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
# environment variable (defaults to http://localhost:4318).
#
#allow_otlp = false
# This item is undocumented. Please contribute documentation for it.
# Filter for OTLP tracing spans. This controls which spans are exported
# to the OTLP collector.
#
#jaeger_filter = "info"
#otlp_filter = "info"
# If the 'perf_measurements' compile-time feature is enabled, enables
# collecting folded stack trace profile of tracing spans using
@ -1694,6 +1702,10 @@
#
#config_reload_signal = true
# This item is undocumented. Please contribute documentation for it.
#
#ldap = false
[global.tls]
# Path to a valid TLS certificate file.
@ -1772,3 +1784,91 @@
# is 33.55MB. Setting it to 0 disables blurhashing.
#
#blurhash_max_raw_size = 33554432
[global.ldap]
# Whether to enable LDAP login.
#
# example: "true"
#
#enable = false
# Whether to force LDAP authentication or authorize classical password
# login.
#
# example: "true"
#
#ldap_only = false
# URI of the LDAP server.
#
# example: "ldap://ldap.example.com:389"
#
#uri = ""
# Root of the searches.
#
# example: "ou=users,dc=example,dc=org"
#
#base_dn = ""
# Bind DN if anonymous search is not enabled.
#
# You can use the variable `{username}` that will be replaced by the
# entered username. In such case, the password used to bind will be the
# one provided for the login and not the one given by
# `bind_password_file`. Beware: automatically granting admin rights will
# not work if you use this direct bind instead of a LDAP search.
#
# example: "cn=ldap-reader,dc=example,dc=org" or
# "cn={username},ou=users,dc=example,dc=org"
#
#bind_dn = ""
# Path to a file on the system that contains the password for the
# `bind_dn`.
#
# The server must be able to access the file, and it must not be empty.
#
#bind_password_file = ""
# Search filter to limit user searches.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(&(objectClass=person)(memberOf=matrix))"
#
#filter = "(objectClass=*)"
# Attribute to use to uniquely identify the user.
#
# example: "uid" or "cn"
#
#uid_attribute = "uid"
# Attribute containing the display name of the user.
#
# example: "givenName" or "sn"
#
#name_attribute = "givenName"
# Root of the searches for admin users.
#
# Defaults to `base_dn` if empty.
#
# example: "ou=admins,dc=example,dc=org"
#
#admin_base_dn = ""
# The LDAP search filter to find administrative users for continuwuity.
#
# If left blank, administrative state must be configured manually for each
# user.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
#
#admin_filter = ""

44
debian/postinst vendored
View file

@ -1,44 +0,0 @@
#!/bin/sh
set -e
# TODO: implement debconf support that is maintainable without duplicating the config
#. /usr/share/debconf/confmodule
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
case "$1" in
configure)
# Create the `conduwuit` user if it does not exist yet.
if ! getent passwd conduwuit > /dev/null ; then
echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2
adduser --system --group --quiet \
--home "$CONDUWUIT_DATABASE_PATH" \
--disabled-login \
--shell "/usr/sbin/nologin" \
conduwuit
fi
# Create the database path if it does not exist yet and fix up ownership
# and permissions for the config.
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
# symlink the previous location for compatibility if it does not exist yet.
if ! test -L "/var/lib/matrix-conduit" ; then
ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
fi
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
chmod -v 740 "$CONDUWUIT_DATABASE_PATH"
echo ''
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
echo 'To start the server, run: systemctl start conduwuit.service'
echo ''
;;
esac
#DEBHELPER#

View file

@ -21,6 +21,7 @@ This document contains the help content for the `admin` command-line program.
* [`admin users list-joined-rooms`↴](#admin-users-list-joined-rooms)
* [`admin users force-join-room`↴](#admin-users-force-join-room)
* [`admin users force-leave-room`↴](#admin-users-force-leave-room)
* [`admin users force-leave-remote-room`↴](#admin-users-force-leave-remote-room)
* [`admin users force-demote`↴](#admin-users-force-demote)
* [`admin users make-user-admin`↴](#admin-users-make-user-admin)
* [`admin users put-room-tag`↴](#admin-users-put-room-tag)
@ -295,6 +296,7 @@ You can find the ID using the `list-appservices` command.
* `list-joined-rooms` — - Lists all the rooms (local and remote) that the specified user is joined in
* `force-join-room` — - Manually join a local user to a room
* `force-leave-room` — - Manually leave a local user from a room
* `force-leave-remote-room` — - Manually leave a remote room for a local user
* `force-demote` — - Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
* `make-user-admin` — - Grant server-admin privileges to a user
* `put-room-tag` — - Puts a room tag for the specified user and room ID
@ -449,6 +451,19 @@ Reverses the effects of the `suspend` command, allowing the user to send message
## `admin users force-leave-remote-room`
- Manually leave a remote room for a local user
**Usage:** `admin users force-leave-remote-room <USER_ID> <ROOM_ID>`
###### **Arguments:**
* `<USER_ID>`
* `<ROOM_ID>`
## `admin users force-demote`
- Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits

View file

@ -9,24 +9,11 @@
</details>
## Debian systemd unit file
## systemd unit file
<details>
<summary>Debian systemd unit file</summary>
<summary>systemd unit file</summary>
```
{{#include ../../debian/conduwuit.service}}
{{#include ../../pkg/conduwuit.service}}
```
</details>
## Arch Linux systemd unit file
<details>
<summary>Arch Linux systemd unit file</summary>
```
{{#include ../../arch/conduwuit.service}}
```
</details>

View file

@ -1 +1 @@
{{#include ../../debian/README.md}}
{{#include ../../pkg/debian/README.md}}

24
flake.lock generated
View file

@ -153,11 +153,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1751525020,
"narHash": "sha256-oDO6lCYS5Bf4jUITChj9XV7k3TP38DE0Ckz5n5ORCME=",
"lastModified": 1755585599,
"narHash": "sha256-tl/0cnsqB/Yt7DbaGMel2RLa7QG5elA8lkaOXli6VdY=",
"owner": "nix-community",
"repo": "fenix",
"rev": "a1a5f92f47787e7df9f30e5e5ac13e679215aa1e",
"rev": "6ed03ef4c8ec36d193c18e06b9ecddde78fb7e42",
"type": "github"
},
"original": {
@ -516,16 +516,16 @@
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1741308171,
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
"ref": "v9.11.1",
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
"revCount": 13177,
"lastModified": 1753385396,
"narHash": "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=",
"ref": "10.4.fb",
"rev": "28d4b7276c16ed3e28af1bd96162d6442ce25923",
"revCount": 13318,
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
},
"original": {
"ref": "v9.11.1",
"ref": "10.4.fb",
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
}
@ -546,11 +546,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1751433876,
"narHash": "sha256-IsdwOcvLLDDlkFNwhdD5BZy20okIQL01+UQ7Kxbqh8s=",
"lastModified": 1755504847,
"narHash": "sha256-VX0B9hwhJypCGqncVVLC+SmeMVd/GAYbJZ0MiiUn2Pk=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "11d45c881389dae90b0da5a94cde52c79d0fc7ef",
"rev": "a905e3b21b144d77e1b304e49f3264f6f8d4db75",
"type": "github"
},
"original": {

View file

@ -17,7 +17,7 @@
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
rocksdb = {
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=v9.11.1";
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=10.4.fb";
flake = false;
};
};
@ -31,20 +31,24 @@
inherit system;
};
fnx = inputs.fenix.packages.${system};
# The Rust toolchain to use
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
toolchain = fnx.combine [
(fnx.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-KUm16pHj+cRedf8vxs/Hd2YWxpOrWZ7UOrwhILdSJBU=";
};
# See also `rust-toolchain.toml`
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
})
fnx.complete.rustfmt
];
mkScope =
pkgs:
pkgs.lib.makeScope pkgs.newScope (self: {
inherit pkgs inputs;
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
main = self.callPackage ./nix/pkgs/main { };
main = self.callPackage ./pkg/nix/pkgs/main { };
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [
@ -62,7 +66,7 @@
}).overrideAttrs
(old: {
src = inputs.rocksdb;
version = "v9.11.1";
version = "v10.4.fb";
cmakeFlags =
pkgs.lib.subtractLists [
# No real reason to have snappy or zlib, no one uses this

View file

@ -1,25 +1,24 @@
[Unit]
Description=Continuwuity - Matrix homeserver
Documentation=https://continuwuity.org/
Wants=network-online.target
After=network-online.target
Documentation=https://continuwuity.org/
Alias=matrix-conduwuit.service
[Service]
DynamicUser=yes
User=conduwuit
Group=conduwuit
Type=notify
Type=notify-reload
ReloadSignal=SIGUSR1
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
ExecStart=/usr/sbin/conduwuit
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
ExecStart=/usr/bin/conduwuit
AmbientCapabilities=
CapabilityBoundingSet=
@ -52,16 +51,17 @@ SystemCallArchitectures=native
SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
#StateDirectory=conduwuit
StateDirectory=conduwuit
ConfigurationDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Restart=on-failure
RestartSec=5
TimeoutStopSec=2m
TimeoutStartSec=2m
TimeoutStopSec=4m
TimeoutStartSec=4m
StartLimitInterval=1m
StartLimitBurst=5

20
pkg/debian/postinst Normal file
View file

@ -0,0 +1,20 @@
#!/bin/sh
set -e
# TODO: implement debconf support that is maintainable without duplicating the config
#. /usr/share/debconf/confmodule
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
case "$1" in
configure)
echo ''
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
echo 'To start the server, run: systemctl start conduwuit.service'
echo ''
;;
esac
#DEBHELPER#

View file

@ -20,24 +20,18 @@ case $1 in
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
if test -L "$CONDUWUIT_CONFIG_PATH"; then
echo "Deleting conduwuit configuration files"
echo "Deleting continuwuity configuration files"
rm -v -r "$CONDUWUIT_CONFIG_PATH"
fi
fi
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
if test -L "$CONDUWUIT_DATABASE_PATH"; then
echo "Deleting conduwuit database directory"
echo "Deleting continuwuity database directory"
rm -r "$CONDUWUIT_DATABASE_PATH"
fi
fi
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
echo "Removing matrix-conduit symlink"
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
fi
fi
;;
esac

View file

@ -0,0 +1,80 @@
# This should be run using rpkg-util: https://docs.pagure.org/rpkg-util
# it requires Internet access and is not suitable for Fedora main repos
# TODO: rpkg-util is no longer maintained, find a replacement
Name: continuwuity
Version: {{{ git_repo_version }}}
Release: 1%{?dist}
Summary: Very cool Matrix chat homeserver written in Rust
License: Apache-2.0 AND MIT
URL: https://continuwuity.org
VCS: {{{ git_repo_vcs }}}
Source: {{{ git_repo_pack }}}
BuildRequires: cargo-rpm-macros >= 25
BuildRequires: systemd-rpm-macros
# Needed to build rust-librocksdb-sys
BuildRequires: clang
BuildRequires: liburing-devel
Requires: liburing
Requires: glibc
Requires: libstdc++
%global _description %{expand:
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
%description %{_description}
%prep
{{{ git_repo_setup_macro }}}
%cargo_prep -N
# Perform an online build so Git dependencies can be retrieved
sed -i 's/^offline = true$//' .cargo/config.toml
%build
%cargo_build
# Here's the one legally required mystery incantation in this file.
# Some of our dependencies have source files which are (for some reason) marked as executable.
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
# at the end of the build step, and then the BRP shebang mangling script checks
# the entire buildroot to find executable files, and fails the build because
# it thinks Rust's file attributes are shebangs because they start with `#!`.
# So we have to clear the executable bit on all of them before that happens.
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
# TODO: this fails currently because it's forced to run in offline mode
# {cargo_license -- --no-dev} > LICENSE.dependencies
%install
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
%files
%license LICENSE
%license src/core/matrix/state_res/LICENSE
%doc CODE_OF_CONDUCT.md
%doc CONTRIBUTING.md
%doc README.md
%doc SECURITY.md
%config %{_sysconfdir}/conduwuit/conduwuit.toml
%{_bindir}/conduwuit
%{_unitdir}/conduwuit.service
# Do not create /var/lib/conduwuit, systemd will create it if necessary
%post
%systemd_post conduwuit.service
%preun
%systemd_preun conduwuit.service
%postun
%systemd_postun_with_restart conduwuit.service
%changelog
{{{ git_repo_changelog }}}

View file

@ -1,26 +1,59 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended"
],
"lockFileMaintenance": {
"enabled": true,
"schedule": [
"at any time"
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended"],
"lockFileMaintenance": {
"enabled": true,
"schedule": ["at any time"]
},
"nix": {
"enabled": true
},
"labels": ["Dependencies", "Dependencies/Renovate"],
"ignoreDeps": [
"tikv-jemallocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"opentelemetry",
"opentelemetry_sdk",
"opentelemetry-jaeger",
"tracing-opentelemetry"
],
"github-actions": {
"enabled": true,
"managerFilePatterns": [
"/(^|/)\\.forgejo/workflows/[^/]+\\.ya?ml$/",
"/(^|/)\\.forgejo/actions/[^/]+/action\\.ya?ml$/",
"/(^|/)\\.github/workflows/[^/]+\\.ya?ml$/",
"/(^|/)\\.github/actions/[^/]+/action\\.ya?ml$/"
]
},
"packageRules": [
{
"description": "Batch minor and patch GitHub Actions updates",
"matchManagers": ["github-actions"],
"matchUpdateTypes": ["minor", "patch"],
"groupName": "github-actions-non-major"
},
{
"description": "Group Rust toolchain updates into a single PR",
"matchManagers": ["custom.regex"],
"matchPackageNames": ["rust", "rustc", "cargo"],
"groupName": "rust-toolchain"
},
{
"description": "Group lockfile updates into a single PR",
"matchUpdateTypes": ["lockFileMaintenance"],
"groupName": "lockfile-maintenance"
},
{
"description": "Batch patch-level Rust dependency updates",
"matchManagers": ["cargo"],
"matchUpdateTypes": ["patch"],
"groupName": "rust-patch-updates"
},
{
"matchManagers": ["cargo"],
"prConcurrentLimit": 5
}
]
},
"nix": {
"enabled": true
},
"labels": [
"dependencies",
"github_actions"
],
"ignoreDeps": [
"tikv-jemllocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"opentelemetry-rust",
"tracing-opentelemetry"
]
}

View file

@ -9,13 +9,16 @@
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.87.0"
profile = "minimal"
channel = "1.89.0"
components = [
# For rust-analyzer
"rust-src",
"rust-analyzer",
# For CI and editors
"rustfmt",
"clippy",
# you have to install rustfmt nightly yourself (if you're not on NixOS)
#
# The rust-toolchain.toml file doesn't provide any syntax for specifying components from different toolchains
# "rustfmt"
]

View file

@ -89,6 +89,7 @@ serde_yaml.workspace = true
tokio.workspace = true
tracing-subscriber.workspace = true
tracing.workspace = true
ctor.workspace = true
[lints]
workspace = true

View file

@ -29,6 +29,8 @@ pub(crate) use crate::{context::Context, utils::get_room_info};
pub(crate) const PAGE_SIZE: usize = 100;
use ctor::{ctor, dtor};
conduwuit::mod_ctor! {}
conduwuit::mod_dtor! {}
conduwuit::rustc_flags_capture! {}

View file

@ -1,8 +1,8 @@
use std::{collections::BTreeMap, fmt::Write as _};
use api::client::{
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, update_avatar_url,
update_displayname,
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room,
update_avatar_url, update_displayname,
};
use conduwuit::{
Err, Result, debug, debug_warn, error, info, is_equal_to,
@ -68,7 +68,8 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
// Create user
self.services
.users
.create(&user_id, Some(password.as_str()))?;
.create(&user_id, Some(password.as_str()), None)
.await?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
@ -284,6 +285,7 @@ pub(super) async fn reset_password(&self, username: String, password: Option<Str
.services
.users
.set_password(&user_id, Some(new_password.as_str()))
.await
{
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
| Ok(()) => {
@ -924,3 +926,29 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
))
.await
}
#[admin_command]
pub(super) async fn force_leave_remote_room(
&self,
user_id: String,
room_id: OwnedRoomOrAliasId,
) -> Result {
let user_id = parse_local_user_id(self.services, &user_id)?;
let (room_id, _) = self
.services
.rooms
.alias
.resolve_with_servers(&room_id, None)
.await?;
assert!(
self.services.globals.user_is_local(&user_id),
"Parsed user_id must be a local user"
);
remote_leave_room(self.services, &user_id, &room_id, None)
.boxed()
.await?;
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
.await
}

View file

@ -103,6 +103,12 @@ pub enum UserCommand {
room_id: OwnedRoomOrAliasId,
},
/// - Manually leave a remote room for a local user.
ForceLeaveRemoteRoom {
user_id: String,
room_id: OwnedRoomOrAliasId,
},
/// - Forces the specified user to drop their power levels to the room
/// default, if their permissions allow and the auth check permits
ForceDemote {

View file

@ -49,6 +49,9 @@ jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
"conduwuit-service/jemalloc_stats",
]
ldap = [
"conduwuit-service/ldap"
]
release_max_log_level = [
"conduwuit-core/release_max_log_level",
"conduwuit-service/release_max_log_level",
@ -90,6 +93,7 @@ serde.workspace = true
sha1.workspace = true
tokio.workspace = true
tracing.workspace = true
ctor.workspace = true
[lints]
workspace = true

View file

@ -373,7 +373,7 @@ pub(crate) async fn register_route(
let password = if is_guest { None } else { body.password.as_deref() };
// Create user
services.users.create(&user_id, password)?;
services.users.create(&user_id, password, None).await?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
@ -659,7 +659,8 @@ pub(crate) async fn change_password_route(
services
.users
.set_password(sender_user, Some(&body.new_password))?;
.set_password(sender_user, Some(&body.new_password))
.await?;
if body.logout_devices {
// Logout all devices except the current one

View file

@ -0,0 +1,3 @@
mod suspend;
pub(crate) use self::suspend::*;

View file

@ -0,0 +1,89 @@
use axum::extract::State;
use conduwuit::{Err, Result};
use futures::future::{join, join3};
use ruma::api::client::admin::{get_suspended, set_suspended};
use crate::Ruma;
/// # `GET /_matrix/client/v1/admin/suspend/{userId}`
///
/// Check the suspension status of a target user
pub(crate) async fn get_suspended_status(
State(services): State<crate::State>,
body: Ruma<get_suspended::v1::Request>,
) -> Result<get_suspended::v1::Response> {
let sender_user = body.sender_user();
let (admin, active) =
join(services.users.is_admin(sender_user), services.users.is_active(&body.user_id)).await;
if !admin {
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
}
if !services.globals.user_is_local(&body.user_id) {
return Err!(Request(InvalidParam("Can only check the suspended status of local users")));
}
if !active {
return Err!(Request(NotFound("Unknown user")));
}
Ok(get_suspended::v1::Response::new(
services.users.is_suspended(&body.user_id).await?,
))
}
/// # `PUT /_matrix/client/v1/admin/suspend/{userId}`
///
/// Set the suspension status of a target user
pub(crate) async fn put_suspended_status(
State(services): State<crate::State>,
body: Ruma<set_suspended::v1::Request>,
) -> Result<set_suspended::v1::Response> {
let sender_user = body.sender_user();
let (sender_admin, active, target_admin) = join3(
services.users.is_admin(sender_user),
services.users.is_active(&body.user_id),
services.users.is_admin(&body.user_id),
)
.await;
if !sender_admin {
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
}
if !services.globals.user_is_local(&body.user_id) {
return Err!(Request(InvalidParam("Can only set the suspended status of local users")));
}
if !active {
return Err!(Request(NotFound("Unknown user")));
}
if body.user_id == *sender_user {
return Err!(Request(Forbidden("You cannot suspend yourself")));
}
if target_admin {
return Err!(Request(Forbidden("You cannot suspend another server administrator")));
}
if services.users.is_suspended(&body.user_id).await? == body.suspended {
// No change
return Ok(set_suspended::v1::Response::new(body.suspended));
}
let action = if body.suspended {
services
.users
.suspend_account(&body.user_id, sender_user)
.await;
"suspended"
} else {
services.users.unsuspend_account(&body.user_id).await;
"unsuspended"
};
if services.config.admin_room_notices {
// Notify the admin room that an account has been un/suspended
services
.admin
.send_text(&format!("{} has been {} by {}.", body.user_id, action, sender_user))
.await;
}
Ok(set_suspended::v1::Response::new(body.suspended))
}

View file

@ -19,7 +19,7 @@ use crate::Ruma;
/// of this server.
pub(crate) async fn get_capabilities_route(
State(services): State<crate::State>,
_body: Ruma<get_capabilities::v3::Request>,
body: Ruma<get_capabilities::v3::Request>,
) -> Result<get_capabilities::v3::Response> {
let available: BTreeMap<RoomVersionId, RoomVersionStability> =
Server::available_room_versions().collect();
@ -45,5 +45,14 @@ pub(crate) async fn get_capabilities_route(
json!({"enabled": services.config.forget_forced_upon_leave}),
)?;
if services
.users
.is_admin(body.sender_user.as_ref().unwrap())
.await
{
// Advertise suspension API
capabilities.set("uk.timedout.msc4323", json!({"suspend":true, "lock": false}))?;
}
Ok(get_capabilities::v3::Response { capabilities })
}

View file

@ -156,31 +156,34 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.await?;
let mut servers = body.via.clone();
servers.extend(
services
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
if servers.is_empty() {
debug!("No via servers provided for join, injecting some.");
servers.extend(
services
.rooms
.state_cache
.servers_invite_via(&room_id)
.map(ToOwned::to_owned)
.collect::<Vec<_>>()
.await,
);
servers.extend(
services
.rooms
.state_cache
.invite_state(sender_user, &room_id)
.await
.unwrap_or_default()
.iter()
.filter_map(|event| event.get_field("sender").ok().flatten())
.filter_map(|sender: &str| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()),
);
servers.extend(
services
.rooms
.state_cache
.invite_state(sender_user, &room_id)
.await
.unwrap_or_default()
.iter()
.filter_map(|event| event.get_field("sender").ok().flatten())
.filter_map(|sender: &str| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()),
);
if let Some(server) = room_id.server_name() {
servers.push(server.to_owned());
if let Some(server) = room_id.server_name() {
servers.push(server.to_owned());
}
}
servers.sort_unstable();

View file

@ -215,7 +215,7 @@ pub async fn leave_room(
Ok(())
}
async fn remote_leave_room(
pub async fn remote_leave_room(
services: &Services,
user_id: &UserId,
room_id: &RoomId,

View file

@ -29,7 +29,7 @@ pub(crate) use self::{
};
pub use self::{
join::join_room_by_id_helper,
leave::{leave_all_rooms, leave_room},
leave::{leave_all_rooms, leave_room, remote_leave_room},
};
use crate::{Ruma, client::full_user_deactivate};

View file

@ -321,7 +321,7 @@ pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Opti
filter.matches(pdu).then_some(item)
}
#[cfg_attr(debug_assertions, conduwuit::ctor)]
#[cfg_attr(debug_assertions, ctor::ctor)]
fn _is_sorted() {
debug_assert!(
IGNORED_MESSAGE_TYPES.is_sorted(),

View file

@ -1,5 +1,6 @@
pub(super) mod account;
pub(super) mod account_data;
pub(super) mod admin;
pub(super) mod alias;
pub(super) mod appservice;
pub(super) mod backup;
@ -43,6 +44,7 @@ pub(super) mod well_known;
pub use account::full_user_deactivate;
pub(super) use account::*;
pub(super) use account_data::*;
pub(super) use admin::*;
pub(super) use alias::*;
pub(super) use appservice::*;
pub(super) use backup::*;
@ -55,7 +57,7 @@ pub(super) use keys::*;
pub(super) use media::*;
pub(super) use media_legacy::*;
pub(super) use membership::*;
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room};
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room};
pub(super) use message::*;
pub(super) use openid::*;
pub(super) use presence::*;

View file

@ -90,7 +90,7 @@ pub(crate) async fn get_displayname_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services
@ -189,7 +189,7 @@ pub(crate) async fn get_avatar_url_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services
@ -248,7 +248,7 @@ pub(crate) async fn get_profile_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services

View file

@ -3,13 +3,14 @@ use std::time::Duration;
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Error, Result, debug, err, info, utils,
utils::{ReadyExt, hash},
Err, Error, Result, debug, err, info,
utils::{self, ReadyExt, hash},
};
use conduwuit_service::uiaa::SESSION_ID_LENGTH;
use conduwuit_core::{debug_error, debug_warn};
use conduwuit_service::{Services, uiaa::SESSION_ID_LENGTH};
use futures::StreamExt;
use ruma::{
UserId,
OwnedUserId, UserId,
api::client::{
session::{
get_login_token,
@ -49,6 +50,154 @@ pub(crate) async fn get_login_types_route(
]))
}
/// Authenticates the given user by its ID and its password.
///
/// Returns the user ID if successful, and an error otherwise.
#[tracing::instrument(skip_all, fields(%user_id), name = "password")]
pub(crate) async fn password_login(
services: &Services,
user_id: &UserId,
lowercased_user_id: &UserId,
password: &str,
) -> Result<OwnedUserId> {
// Restrict login to accounts only of type 'password', including untyped
// legacy accounts which are equivalent to 'password'.
if services
.users
.origin(user_id)
.await
.is_ok_and(|origin| origin != "password")
{
return Err!(Request(Forbidden("Account does not permit password login.")));
}
let (hash, user_id) = match services.users.password_hash(user_id).await {
| Ok(hash) => (hash, user_id),
| Err(_) => services
.users
.password_hash(lowercased_user_id)
.await
.map(|hash| (hash, lowercased_user_id))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?,
};
if hash.is_empty() {
return Err!(Request(UserDeactivated("The user has been deactivated")));
}
hash::verify_password(password, &hash)
.inspect_err(|e| debug_error!("{e}"))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
Ok(user_id.to_owned())
}
/// Authenticates the given user through the configured LDAP server.
///
/// Creates the user if the user is found in the LDAP and do not already have an
/// account.
#[tracing::instrument(skip_all, fields(%user_id), name = "ldap")]
pub(super) async fn ldap_login(
services: &Services,
user_id: &UserId,
lowercased_user_id: &UserId,
password: &str,
) -> Result<OwnedUserId> {
let (user_dn, is_ldap_admin) = match services.config.ldap.bind_dn.as_ref() {
| Some(bind_dn) if bind_dn.contains("{username}") =>
(bind_dn.replace("{username}", lowercased_user_id.localpart()), false),
| _ => {
debug!("Searching user in LDAP");
let dns = services.users.search_ldap(user_id).await?;
if dns.len() >= 2 {
return Err!(Ldap("LDAP search returned two or more results"));
}
let Some((user_dn, is_admin)) = dns.first() else {
return password_login(services, user_id, lowercased_user_id, password).await;
};
(user_dn.clone(), *is_admin)
},
};
let user_id = services
.users
.auth_ldap(&user_dn, password)
.await
.map(|()| lowercased_user_id.to_owned())?;
// LDAP users are automatically created on first login attempt. This is a very
// common feature that can be seen on many services using a LDAP provider for
// their users (synapse, Nextcloud, Jellyfin, ...).
//
// LDAP users are crated with a dummy password but non empty because an empty
// password is reserved for deactivated accounts. The conduwuit password field
// will never be read to login a LDAP user so it's not an issue.
if !services.users.exists(lowercased_user_id).await {
services
.users
.create(lowercased_user_id, Some("*"), Some("ldap"))
.await?;
}
let is_conduwuit_admin = services.admin.user_is_admin(lowercased_user_id).await;
if is_ldap_admin && !is_conduwuit_admin {
services.admin.make_user_admin(lowercased_user_id).await?;
} else if !is_ldap_admin && is_conduwuit_admin {
services.admin.revoke_admin(lowercased_user_id).await?;
}
Ok(user_id)
}
pub(crate) async fn handle_login(
services: &Services,
body: &Ruma<login::v3::Request>,
identifier: Option<&uiaa::UserIdentifier>,
password: &str,
user: Option<&String>,
) -> Result<OwnedUserId> {
debug!("Got password login type");
let user_id =
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
UserId::parse_with_server_name(user_id, &services.config.server_name)
} else if let Some(user) = user {
UserId::parse_with_server_name(user, &services.config.server_name)
} else {
return Err!(Request(Unknown(
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
)));
}
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
let lowercased_user_id = UserId::parse_with_server_name(
user_id.localpart().to_lowercase(),
&services.config.server_name,
)?;
if !services.globals.user_is_local(&user_id)
|| !services.globals.user_is_local(&lowercased_user_id)
{
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
}
if cfg!(feature = "ldap") && services.config.ldap.enable {
match Box::pin(ldap_login(services, &user_id, &lowercased_user_id, password)).await {
| Ok(user_id) => Ok(user_id),
| Err(err) if services.config.ldap.ldap_only => Err(err),
| Err(err) => {
debug_warn!("{err}");
password_login(services, &user_id, &lowercased_user_id, password).await
},
}
} else {
password_login(services, &user_id, &lowercased_user_id, password).await
}
}
/// # `POST /_matrix/client/v3/login`
///
/// Authenticates the user and returns an access token it can use in subsequent
@ -80,70 +229,7 @@ pub(crate) async fn login_route(
password,
user,
..
}) => {
debug!("Got password login type");
let user_id =
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
UserId::parse_with_server_name(user_id, &services.config.server_name)
} else if let Some(user) = user {
UserId::parse_with_server_name(user, &services.config.server_name)
} else {
return Err!(Request(Unknown(
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
)));
}
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
let lowercased_user_id = UserId::parse_with_server_name(
user_id.localpart().to_lowercase(),
&services.config.server_name,
)?;
if !services.globals.user_is_local(&user_id)
|| !services.globals.user_is_local(&lowercased_user_id)
{
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
}
// first try the username as-is
let hash = services
.users
.password_hash(&user_id)
.await
.inspect_err(|e| debug!("{e}"));
match hash {
| Ok(hash) => {
if hash.is_empty() {
return Err!(Request(UserDeactivated("The user has been deactivated")));
}
hash::verify_password(password, &hash)
.inspect_err(|e| debug!("{e}"))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
user_id
},
| Err(_e) => {
let hash_lowercased_user_id = services
.users
.password_hash(&lowercased_user_id)
.await
.inspect_err(|e| debug!("{e}"))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
if hash_lowercased_user_id.is_empty() {
return Err!(Request(UserDeactivated("The user has been deactivated")));
}
hash::verify_password(password, &hash_lowercased_user_id)
.inspect_err(|e| debug!("{e}"))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
lowercased_user_id
},
}
},
}) => handle_login(&services, &body, identifier.as_ref(), password, user.as_ref()).await?,
| login::v3::LoginInfo::Token(login::v3::Token { token }) => {
debug!("Got token login type");
if !services.server.config.login_via_existing_session {

View file

@ -45,6 +45,7 @@ use crate::{
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
#[allow(clippy::cognitive_complexity)]
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
///
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)

View file

@ -292,7 +292,7 @@ pub(crate) async fn get_timezone_key_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services
@ -352,7 +352,7 @@ pub(crate) async fn get_profile_key_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services

View file

@ -58,6 +58,7 @@ pub(crate) async fn get_supported_versions_route(
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
("uk.timedout.msc4323".to_owned(), true), /* agnostic suspend (https://github.com/matrix-org/matrix-spec-proposals/pull/4323) */
]),
};

View file

@ -184,6 +184,8 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
get(client::get_room_summary_legacy)
)
.ruma_route(&client::get_suspended_status)
.ruma_route(&client::put_suspended_status)
.ruma_route(&client::well_known_support)
.ruma_route(&client::well_known_client)
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))

View file

@ -1,3 +1,4 @@
#![allow(clippy::doc_link_with_quotes)]
pub mod check;
pub mod manager;
pub mod proxy;
@ -125,9 +126,11 @@ pub struct Config {
/// This is the only directory where continuwuity will save its data,
/// including media. Note: this was previously "/var/lib/matrix-conduit".
///
/// YOU NEED TO EDIT THIS.
/// YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
/// `systemd` service. The service file sets it to `/var/lib/conduwuit`
/// using an environment variable and also grants write access.
///
/// example: "/var/lib/continuwuity"
/// example: "/var/lib/conduwuit"
pub database_path: PathBuf,
/// continuwuity supports online database backups using RocksDB's Backup
@ -711,12 +714,21 @@ pub struct Config {
#[serde(default)]
pub well_known: WellKnownConfig,
#[serde(default)]
pub allow_jaeger: bool,
/// Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
/// Jaeger exporter. Traces will be sent via OTLP to a collector (such as
/// Jaeger) that supports the OpenTelemetry Protocol.
///
/// Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
/// environment variable (defaults to http://localhost:4318).
#[serde(default, alias = "allow_jaeger")]
pub allow_otlp: bool,
/// Filter for OTLP tracing spans. This controls which spans are exported
/// to the OTLP collector.
///
/// default: "info"
#[serde(default = "default_jaeger_filter")]
pub jaeger_filter: String,
#[serde(default = "default_otlp_filter", alias = "jaeger_filter")]
pub otlp_filter: String,
/// If the 'perf_measurements' compile-time feature is enabled, enables
/// collecting folded stack trace profile of tracing spans using
@ -1945,6 +1957,10 @@ pub struct Config {
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
bool,
// external structure; separate section
#[serde(default)]
pub ldap: LdapConfig,
// external structure; separate section
#[serde(default)]
pub blurhashing: BlurhashConfig,
@ -2039,6 +2055,114 @@ pub struct BlurhashConfig {
pub blurhash_max_raw_size: u64,
}
#[derive(Clone, Debug, Default, Deserialize)]
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.ldap")]
pub struct LdapConfig {
/// Whether to enable LDAP login.
///
/// example: "true"
#[serde(default)]
pub enable: bool,
/// Whether to force LDAP authentication or authorize classical password
/// login.
///
/// example: "true"
#[serde(default)]
pub ldap_only: bool,
/// URI of the LDAP server.
///
/// example: "ldap://ldap.example.com:389"
///
/// default: ""
#[serde(default)]
pub uri: Option<Url>,
/// Root of the searches.
///
/// example: "ou=users,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub base_dn: String,
/// Bind DN if anonymous search is not enabled.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username. In such case, the password used to bind will be the
/// one provided for the login and not the one given by
/// `bind_password_file`. Beware: automatically granting admin rights will
/// not work if you use this direct bind instead of a LDAP search.
///
/// example: "cn=ldap-reader,dc=example,dc=org" or
/// "cn={username},ou=users,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub bind_dn: Option<String>,
/// Path to a file on the system that contains the password for the
/// `bind_dn`.
///
/// The server must be able to access the file, and it must not be empty.
///
/// default: ""
#[serde(default)]
pub bind_password_file: Option<PathBuf>,
/// Search filter to limit user searches.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username for more complex filters.
///
/// example: "(&(objectClass=person)(memberOf=matrix))"
///
/// default: "(objectClass=*)"
#[serde(default = "default_ldap_search_filter")]
pub filter: String,
/// Attribute to use to uniquely identify the user.
///
/// example: "uid" or "cn"
///
/// default: "uid"
#[serde(default = "default_ldap_uid_attribute")]
pub uid_attribute: String,
/// Attribute containing the display name of the user.
///
/// example: "givenName" or "sn"
///
/// default: "givenName"
#[serde(default = "default_ldap_name_attribute")]
pub name_attribute: String,
/// Root of the searches for admin users.
///
/// Defaults to `base_dn` if empty.
///
/// example: "ou=admins,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub admin_base_dn: String,
/// The LDAP search filter to find administrative users for continuwuity.
///
/// If left blank, administrative state must be configured manually for each
/// user.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username for more complex filters.
///
/// example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
///
/// default: ""
#[serde(default)]
pub admin_filter: String,
}
#[derive(Deserialize, Clone, Debug)]
#[serde(transparent)]
struct ListeningPort {
@ -2253,7 +2377,7 @@ fn default_tracing_flame_filter() -> String {
.to_owned()
}
fn default_jaeger_filter() -> String {
fn default_otlp_filter() -> String {
cfg!(debug_assertions)
.then_some("trace,h2=off")
.unwrap_or("info")
@ -2432,3 +2556,9 @@ pub(super) fn default_blurhash_x_component() -> u32 { 4 }
pub(super) fn default_blurhash_y_component() -> u32 { 3 }
// end recommended & blurhashing defaults
fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() }
fn default_ldap_uid_attribute() -> String { String::from("uid") }
fn default_ldap_name_attribute() -> String { String::from("givenName") }

View file

@ -100,7 +100,7 @@ pub fn trap() {
#[must_use]
pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str {
p.downcast_ref::<&str>().copied().unwrap_or_default()
(**p).downcast_ref::<&str>().copied().unwrap_or_default()
}
#[inline(always)]

View file

@ -110,6 +110,8 @@ pub enum Error {
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
#[error(transparent)]
IntoHttp(#[from] ruma::api::error::IntoHttpError),
#[error("{0}")]
Ldap(Cow<'static, str>),
#[error(transparent)]
Mxc(#[from] ruma::MxcUriError),
#[error(transparent)]

View file

@ -18,7 +18,7 @@ pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[
/// Experimental, partially supported room versions
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
&[RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
type RoomVersion = (RoomVersionId, RoomVersionStability);

View file

@ -66,6 +66,7 @@ serde.workspace = true
serde_json.workspace = true
tokio.workspace = true
tracing.workspace = true
ctor.workspace = true
[lints]
workspace = true

View file

@ -19,7 +19,7 @@ where
S: Stream<Item = K> + Send + 'a,
K: AsRef<[u8]> + Send + Sync + 'a,
{
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a;
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
}
impl<'a, K, S> Get<'a, K, S> for S
@ -29,7 +29,7 @@ where
K: AsRef<[u8]> + Send + Sync + 'a,
{
#[inline]
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a {
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
map.get_batch(self)
}
}
@ -39,7 +39,7 @@ where
pub(crate) fn get_batch<'a, S, K>(
self: &'a Arc<Self>,
keys: S,
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
where
S: Stream<Item = K> + Send + 'a,
K: AsRef<[u8]> + Send + Sync + 'a,

View file

@ -10,7 +10,7 @@ use super::stream::is_cached;
use crate::{keyval, keyval::Key, stream};
#[implement(super::Map)]
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
where
K: Deserialize<'a> + Send,
{

View file

@ -15,7 +15,7 @@ use crate::{
pub fn keys_from<'a, K, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -40,7 +40,7 @@ where
pub fn keys_raw_from<'a, K, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,

View file

@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
pub fn keys_prefix<'a, K, P>(
self: &'a Arc<Self>,
prefix: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -37,7 +37,7 @@ where
pub fn keys_raw_prefix<'a, K, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a,
@ -50,7 +50,7 @@ where
pub fn raw_keys_prefix<'a, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a
) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{

View file

@ -17,7 +17,7 @@ where
S: Stream<Item = K> + Send + 'a,
K: Serialize + Debug,
{
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a;
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
}
impl<'a, K, S> Qry<'a, K, S> for S
@ -27,7 +27,7 @@ where
K: Serialize + Debug + 'a,
{
#[inline]
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a {
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
map.qry_batch(self)
}
}
@ -37,7 +37,7 @@ where
pub(crate) fn qry_batch<'a, S, K>(
self: &'a Arc<Self>,
keys: S,
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
where
S: Stream<Item = K> + Send + 'a,
K: Serialize + Debug + 'a,

View file

@ -10,7 +10,7 @@ use super::rev_stream::is_cached;
use crate::{keyval, keyval::Key, stream};
#[implement(super::Map)]
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
where
K: Deserialize<'a> + Send,
{

View file

@ -15,7 +15,7 @@ use crate::{
pub fn rev_keys_from<'a, K, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -41,7 +41,7 @@ where
pub fn rev_keys_raw_from<'a, K, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,

View file

@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
pub fn rev_keys_prefix<'a, K, P>(
self: &'a Arc<Self>,
prefix: &P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -37,7 +37,7 @@ where
pub fn rev_keys_raw_prefix<'a, K, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a,
@ -50,7 +50,7 @@ where
pub fn rev_raw_keys_prefix<'a, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a
) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{

View file

@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
#[implement(super::Map)]
pub fn rev_stream<'a, K, V>(
self: &'a Arc<Self>,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
where
K: Deserialize<'a> + Send,
V: Deserialize<'a> + Send,

View file

@ -20,7 +20,7 @@ use crate::{
pub fn rev_stream_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -55,7 +55,7 @@ where
pub fn rev_stream_raw_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,

View file

@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
pub fn rev_stream_prefix<'a, K, V, P>(
self: &'a Arc<Self>,
prefix: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -50,7 +50,7 @@ where
pub fn rev_stream_raw_prefix<'a, K, V, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a,
@ -68,7 +68,7 @@ where
pub fn rev_raw_stream_prefix<'a, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a
) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{

View file

@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
#[implement(super::Map)]
pub fn stream<'a, K, V>(
self: &'a Arc<Self>,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
where
K: Deserialize<'a> + Send,
V: Deserialize<'a> + Send,

View file

@ -19,7 +19,7 @@ use crate::{
pub fn stream_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -53,7 +53,7 @@ where
pub fn stream_raw_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,

View file

@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
pub fn stream_prefix<'a, K, V, P>(
self: &'a Arc<Self>,
prefix: &P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
@ -50,7 +50,7 @@ where
pub fn stream_raw_prefix<'a, K, V, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
K: Deserialize<'a> + Send + 'a,
@ -68,7 +68,7 @@ where
pub fn raw_stream_prefix<'a, P>(
self: &'a Arc<Self>,
prefix: &'a P,
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a
) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
where
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
{

View file

@ -374,6 +374,10 @@ pub(super) static MAPS: &[Descriptor] = &[
name: "userid_masterkeyid",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "userid_origin",
..descriptor::RANDOM
},
Descriptor {
name: "userid_password",
..descriptor::RANDOM

View file

@ -3,6 +3,8 @@
extern crate conduwuit_core as conduwuit;
extern crate rust_rocksdb as rocksdb;
use ctor::{ctor, dtor};
conduwuit::mod_ctor! {}
conduwuit::mod_dtor! {}
conduwuit::rustc_flags_capture! {}

View file

@ -443,7 +443,7 @@ pub(crate) fn into_send_seek(result: stream::State<'_>) -> stream::State<'static
unsafe { std::mem::transmute(result) }
}
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'_> {
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'static> {
// SAFETY: This is to receive the State from the channel; see above.
unsafe { std::mem::transmute(result) }
}

View file

@ -326,7 +326,7 @@ fn ser_array() {
}
#[test]
#[ignore]
#[ignore = "arrayvec deserialization is not implemented (separators)"]
fn de_array() {
let a: u64 = 123_456;
let b: u64 = 987_654;
@ -358,7 +358,7 @@ fn de_array() {
}
#[test]
#[ignore]
#[ignore = "Nested sequences are not supported"]
fn de_complex() {
type Key<'a> = (&'a UserId, ArrayVec<u64, 2>, &'a RoomId);

View file

@ -13,13 +13,13 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream {
let ret = quote! {
pub static RUSTC_FLAGS: [&str; #flag_len] = [#( #flag ),*];
#[conduwuit_core::ctor]
#[ctor]
fn _set_rustc_flags() {
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
}
// static strings have to be yanked on module unload
#[conduwuit_core::dtor]
#[dtor]
fn _unset_rustc_flags() {
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
}

View file

@ -32,12 +32,12 @@ a cool hard fork of Conduit, a Matrix homeserver written in Rust"""
section = "net"
priority = "optional"
conf-files = ["/etc/conduwuit/conduwuit.toml"]
maintainer-scripts = "../../debian/"
systemd-units = { unit-name = "conduwuit", start = false }
maintainer-scripts = "../../pkg/debian/"
systemd-units = { unit-name = "conduwuit", start = false, unit-scripts = "../../pkg/" }
assets = [
["../../debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"],
["../../pkg/debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"],
["../../README.md", "usr/share/doc/conduwuit/", "644"],
["../../target/release/conduwuit", "usr/sbin/conduwuit", "755"],
["../../target/release/conduwuit", "usr/bin/conduwuit", "755"],
["../../conduwuit-example.toml", "etc/conduwuit/conduwuit.toml", "640"],
]
@ -56,6 +56,7 @@ standard = [
"jemalloc",
"jemalloc_conf",
"journald",
"ldap",
"media_thumbnail",
"systemd",
"url_preview",
@ -63,7 +64,7 @@ standard = [
]
full = [
"standard",
"hardened_malloc",
# "hardened_malloc", # Conflicts with jemalloc
"jemalloc_prof",
"perf_measurements",
"tokio_console"
@ -114,6 +115,9 @@ jemalloc_stats = [
jemalloc_conf = [
"conduwuit-core/jemalloc_conf",
]
ldap = [
"conduwuit-api/ldap",
]
media_thumbnail = [
"conduwuit-service/media_thumbnail",
]
@ -122,7 +126,8 @@ perf_measurements = [
"dep:tracing-flame",
"dep:tracing-opentelemetry",
"dep:opentelemetry_sdk",
"dep:opentelemetry-jaeger",
"dep:opentelemetry-otlp",
"dep:opentelemetry-jaeger-propagator",
"conduwuit-core/perf_measurements",
"conduwuit-core/sentry_telemetry",
]
@ -198,11 +203,14 @@ clap.workspace = true
console-subscriber.optional = true
console-subscriber.workspace = true
const-str.workspace = true
ctor.workspace = true
log.workspace = true
opentelemetry-jaeger.optional = true
opentelemetry-jaeger.workspace = true
opentelemetry.optional = true
opentelemetry.workspace = true
opentelemetry-otlp.optional = true
opentelemetry-otlp.workspace = true
opentelemetry-jaeger-propagator.optional = true
opentelemetry-jaeger-propagator.workspace = true
opentelemetry_sdk.optional = true
opentelemetry_sdk.workspace = true
sentry-tower.optional = true
@ -222,6 +230,7 @@ tracing-subscriber.workspace = true
tracing.workspace = true
tracing-journald = { workspace = true, optional = true }
[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies]
hardened_malloc-rs.workspace = true
hardened_malloc-rs.optional = true

View file

@ -7,6 +7,8 @@ use conduwuit_core::{
log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span},
result::UnwrapOrErr,
};
#[cfg(feature = "perf_measurements")]
use opentelemetry::trace::TracerProvider;
use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload};
#[cfg(feature = "perf_measurements")]
@ -87,30 +89,35 @@ pub(crate) fn init(
(None, None)
};
let jaeger_filter = EnvFilter::try_new(&config.jaeger_filter)
.map_err(|e| err!(Config("jaeger_filter", "{e}.")))?;
let otlp_filter = EnvFilter::try_new(&config.otlp_filter)
.map_err(|e| err!(Config("otlp_filter", "{e}.")))?;
let jaeger_layer = config.allow_jaeger.then(|| {
let otlp_layer = config.allow_otlp.then(|| {
opentelemetry::global::set_text_map_propagator(
opentelemetry_jaeger::Propagator::new(),
opentelemetry_jaeger_propagator::Propagator::new(),
);
let tracer = opentelemetry_jaeger::new_agent_pipeline()
.with_auto_split_batch(true)
.with_service_name(conduwuit_core::name())
.install_batch(opentelemetry_sdk::runtime::Tokio)
.expect("jaeger agent pipeline");
let exporter = opentelemetry_otlp::SpanExporter::builder()
.with_http()
.build()
.expect("Failed to create OTLP exporter");
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
.with_batch_exporter(exporter)
.build();
let tracer = provider.tracer(conduwuit_core::name());
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
let (jaeger_reload_filter, jaeger_reload_handle) =
reload::Layer::new(jaeger_filter.clone());
reload_handles.add("jaeger", Box::new(jaeger_reload_handle));
let (otlp_reload_filter, otlp_reload_handle) =
reload::Layer::new(otlp_filter.clone());
reload_handles.add("otlp", Box::new(otlp_reload_handle));
Some(telemetry.with_filter(jaeger_reload_filter))
Some(telemetry.with_filter(otlp_reload_filter))
});
let subscriber = subscriber.with(flame_layer).with(jaeger_layer);
let subscriber = subscriber.with(flame_layer).with(otlp_layer);
(subscriber, flame_guard)
};

View file

@ -13,6 +13,7 @@ mod sentry;
mod server;
mod signal;
use ctor::{ctor, dtor};
use server::Server;
rustc_flags_capture! {}

View file

@ -125,6 +125,7 @@ tokio.workspace = true
tower.workspace = true
tower-http.workspace = true
tracing.workspace = true
ctor.workspace = true
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
sd-notify.workspace = true

View file

@ -12,6 +12,7 @@ use std::{panic::AssertUnwindSafe, pin::Pin, sync::Arc};
use conduwuit::{Error, Result, Server};
use conduwuit_service::Services;
use ctor::{ctor, dtor};
use futures::{Future, FutureExt, TryFutureExt};
conduwuit::mod_ctor! {}

View file

@ -30,7 +30,7 @@ use tower::{Service, ServiceExt};
type MakeService = IntoMakeServiceWithConnectInfo<Router, net::SocketAddr>;
const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0);
const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750);
#[tracing::instrument(skip_all, level = "debug")]

View file

@ -53,6 +53,9 @@ jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
"conduwuit-database/jemalloc_stats",
]
ldap = [
"dep:ldap3"
]
media_thumbnail = [
"dep:image",
]
@ -89,6 +92,8 @@ image.workspace = true
image.optional = true
ipaddress.workspace = true
itertools.workspace = true
ldap3.workspace = true
ldap3.optional = true
log.workspace = true
loole.workspace = true
lru-cache.workspace = true
@ -112,6 +117,7 @@ webpage.optional = true
blurhash.workspace = true
blurhash.optional = true
recaptcha-verify = { version = "0.1.5", default-features = false }
ctor.workspace = true
[lints]
workspace = true

View file

@ -38,7 +38,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
// Create a user for the server
let server_user = services.globals.server_user.as_ref();
services.users.create(server_user, None)?;
services.users.create(server_user, None, None).await?;
let create_content = {
use RoomVersionId::*;

View file

@ -109,7 +109,10 @@ impl Service {
)?;
if !self.services.users.exists(&appservice_user_id).await {
self.services.users.create(&appservice_user_id, None)?;
self.services
.users
.create(&appservice_user_id, None, None)
.await?;
} else if self
.services
.users
@ -120,7 +123,8 @@ impl Service {
// Reactivate the appservice user if it was accidentally deactivated
self.services
.users
.set_password(&appservice_user_id, None)?;
.set_password(&appservice_user_id, None)
.await?;
}
self.registration_info

View file

@ -41,6 +41,11 @@ impl crate::Service for Service {
return Ok(());
}
if self.services.config.ldap.enable {
warn!("emergency password feature not available with LDAP enabled.");
return Ok(());
}
self.set_emergency_access().await.inspect_err(|e| {
error!("Could not set the configured emergency password for the server user: {e}");
})
@ -57,7 +62,8 @@ impl Service {
self.services
.users
.set_password(server_user, self.services.config.emergency_password.as_deref())?;
.set_password(server_user, self.services.config.emergency_password.as_deref())
.await?;
let (ruleset, pwd_set) = match self.services.config.emergency_password {
| Some(_) => (Ruleset::server_default(server_user), true),

View file

@ -215,8 +215,8 @@ async fn db_lt_12(services: &Services) -> Result<()> {
for username in &services
.users
.list_local_users()
.map(UserId::to_owned)
.collect::<Vec<_>>()
.map(ToOwned::to_owned)
.collect::<Vec<OwnedUserId>>()
.await
{
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)
@ -295,8 +295,8 @@ async fn db_lt_13(services: &Services) -> Result<()> {
for username in &services
.users
.list_local_users()
.map(UserId::to_owned)
.collect::<Vec<_>>()
.map(ToOwned::to_owned)
.collect::<Vec<OwnedUserId>>()
.await
{
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)

View file

@ -33,6 +33,7 @@ pub mod users;
extern crate conduwuit_core as conduwuit;
extern crate conduwuit_database as database;
use ctor::{ctor, dtor};
pub(crate) use service::{Args, Dep, Service};
pub use crate::services::Services;

View file

@ -183,8 +183,8 @@ impl Service {
.services
.users
.list_local_users()
.map(UserId::to_owned)
.collect::<Vec<_>>()
.map(ToOwned::to_owned)
.collect::<Vec<OwnedUserId>>()
.await
{
let presence = self.db.get_presence(user_id).await;

View file

@ -178,7 +178,7 @@ impl Service {
pub fn get_pushkeys<'a>(
&'a self,
sender: &'a UserId,
) -> impl Stream<Item = &str> + Send + 'a {
) -> impl Stream<Item = &'a str> + Send + 'a {
let prefix = (sender, Interfix);
self.db
.senderkey_pusher

View file

@ -178,7 +178,7 @@ impl Service {
pub fn local_aliases_for_room<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &RoomAliasId> + Send + 'a {
) -> impl Stream<Item = &'a RoomAliasId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.aliasid_alias
@ -188,7 +188,9 @@ impl Service {
}
#[tracing::instrument(skip(self), level = "debug")]
pub fn all_local_aliases<'a>(&'a self) -> impl Stream<Item = (&RoomId, &str)> + Send + 'a {
pub fn all_local_aliases<'a>(
&'a self,
) -> impl Stream<Item = (&'a RoomId, &'a str)> + Send + 'a {
self.db
.alias_roomid
.stream()

View file

@ -60,7 +60,7 @@ impl Data {
target: ShortEventId,
from: PduCount,
dir: Direction,
) -> impl Stream<Item = (PduCount, impl Event)> + Send + '_ {
) -> impl Stream<Item = (PduCount, impl Event)> + Send + 'a {
// Query from exact position then filter excludes it (saturating_inc could skip
// events at min/max boundaries)
let from_unsigned = from.into_unsigned();

View file

@ -65,7 +65,7 @@ impl Data {
&'a self,
room_id: &'a RoomId,
since: u64,
) -> impl Stream<Item = ReceiptItem<'_>> + Send + 'a {
) -> impl Stream<Item = ReceiptItem<'a>> + Send + 'a {
type Key<'a> = (&'a RoomId, u64, &'a UserId);
type KeyVal<'a> = (Key<'a>, CanonicalJsonObject);

View file

@ -112,7 +112,7 @@ impl Service {
&'a self,
room_id: &'a RoomId,
since: u64,
) -> impl Stream<Item = ReceiptItem<'_>> + Send + 'a {
) -> impl Stream<Item = ReceiptItem<'a>> + Send + 'a {
self.db.readreceipts_since(room_id, since)
}

View file

@ -104,7 +104,7 @@ pub fn deindex_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_b
pub async fn search_pdus<'a>(
&'a self,
query: &'a RoomQuery<'a>,
) -> Result<(usize, impl Stream<Item = impl Event + use<>> + Send + '_)> {
) -> Result<(usize, impl Stream<Item = impl Event + use<>> + Send + 'a)> {
let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await;
let filter = &query.criteria.filter;
@ -137,10 +137,10 @@ pub async fn search_pdus<'a>(
// result is modeled as a stream such that callers don't have to be refactored
// though an additional async/wrap still exists for now
#[implement(Service)]
pub async fn search_pdu_ids(
&self,
query: &RoomQuery<'_>,
) -> Result<impl Stream<Item = RawPduId> + Send + '_ + use<'_>> {
pub async fn search_pdu_ids<'a>(
&'a self,
query: &'a RoomQuery<'_>,
) -> Result<impl Stream<Item = RawPduId> + Send + 'a + use<'a>> {
let shortroomid = self.services.short.get_shortroomid(query.room_id).await?;
let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await;
@ -173,7 +173,7 @@ fn search_pdu_ids_query_words<'a>(
&'a self,
shortroomid: ShortRoomId,
word: &'a str,
) -> impl Stream<Item = RawPduId> + Send + '_ {
) -> impl Stream<Item = RawPduId> + Send + 'a {
self.search_pdu_ids_query_word(shortroomid, word)
.map(move |key| -> RawPduId {
let key = &key[prefix_len(word)..];
@ -183,11 +183,11 @@ fn search_pdu_ids_query_words<'a>(
/// Iterate over raw database results for a word
#[implement(Service)]
fn search_pdu_ids_query_word(
&self,
fn search_pdu_ids_query_word<'a>(
&'a self,
shortroomid: ShortRoomId,
word: &str,
) -> impl Stream<Item = Val<'_>> + Send + '_ + use<'_> {
word: &'a str,
) -> impl Stream<Item = Val<'a>> + Send + 'a + use<'a> {
// rustc says const'ing this not yet stable
let end_id: RawPduId = PduId {
shortroomid,

View file

@ -62,7 +62,7 @@ pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEvent
pub fn multi_get_or_create_shorteventid<'a, I>(
&'a self,
event_ids: I,
) -> impl Stream<Item = ShortEventId> + Send + '_
) -> impl Stream<Item = ShortEventId> + Send + 'a
where
I: Iterator<Item = &'a EventId> + Clone + Debug + Send + 'a,
{

View file

@ -388,8 +388,7 @@ impl Service {
pub fn get_forward_extremities<'a>(
&'a self,
room_id: &'a RoomId,
_state_lock: &'a RoomMutexGuard,
) -> impl Stream<Item = &EventId> + Send + '_ {
) -> impl Stream<Item = &'a EventId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db

View file

@ -144,7 +144,7 @@ pub fn clear_appservice_in_room_cache(&self) { self.appservice_in_room_cache.wri
pub fn room_servers<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &ServerName> + Send + 'a {
) -> impl Stream<Item = &'a ServerName> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomserverids
@ -167,7 +167,7 @@ pub async fn server_in_room<'a>(&'a self, server: &'a ServerName, room_id: &'a R
pub fn server_rooms<'a>(
&'a self,
server: &'a ServerName,
) -> impl Stream<Item = &RoomId> + Send + 'a {
) -> impl Stream<Item = &'a RoomId> + Send + 'a {
let prefix = (server, Interfix);
self.db
.serverroomids
@ -202,7 +202,7 @@ pub fn get_shared_rooms<'a>(
&'a self,
user_a: &'a UserId,
user_b: &'a UserId,
) -> impl Stream<Item = &RoomId> + Send + 'a {
) -> impl Stream<Item = &'a RoomId> + Send + 'a {
use conduwuit::utils::set;
let a = self.rooms_joined(user_a);
@ -216,7 +216,7 @@ pub fn get_shared_rooms<'a>(
pub fn room_members<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomuserid_joined
@ -239,7 +239,7 @@ pub async fn room_joined_count(&self, room_id: &RoomId) -> Result<u64> {
pub fn local_users_in_room<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
self.room_members(room_id)
.ready_filter(|user| self.services.globals.user_is_local(user))
}
@ -251,7 +251,7 @@ pub fn local_users_in_room<'a>(
pub fn active_local_users_in_room<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
self.local_users_in_room(room_id)
.filter(|user| self.services.users.is_active(user))
}
@ -273,7 +273,7 @@ pub async fn room_invited_count(&self, room_id: &RoomId) -> Result<u64> {
pub fn room_useroncejoined<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomuseroncejoinedids
@ -288,7 +288,7 @@ pub fn room_useroncejoined<'a>(
pub fn room_members_invited<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomuserid_invitecount
@ -303,7 +303,7 @@ pub fn room_members_invited<'a>(
pub fn room_members_knocked<'a>(
&'a self,
room_id: &'a RoomId,
) -> impl Stream<Item = &UserId> + Send + 'a {
) -> impl Stream<Item = &'a UserId> + Send + 'a {
let prefix = (room_id, Interfix);
self.db
.roomuserid_knockedcount
@ -347,7 +347,7 @@ pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result
pub fn rooms_joined<'a>(
&'a self,
user_id: &'a UserId,
) -> impl Stream<Item = &RoomId> + Send + 'a {
) -> impl Stream<Item = &'a RoomId> + Send + 'a {
self.db
.userroomid_joined
.keys_raw_prefix(user_id)

View file

@ -49,7 +49,7 @@ pub async fn update_membership(
#[allow(clippy::collapsible_if)]
if !self.services.globals.user_is_local(user_id) {
if !self.services.users.exists(user_id).await {
self.services.users.create(user_id, None)?;
self.services.users.create(user_id, None, None).await?;
}
}

Some files were not shown because too many files have changed in this diff Show more