mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-09-10 22:22:48 +02:00
Compare commits
2 commits
3df097c2a7
...
3c35fb61c9
Author | SHA1 | Date | |
---|---|---|---|
|
3c35fb61c9 |
||
|
b1be499973 |
110 changed files with 1227 additions and 3055 deletions
|
@ -26,7 +26,3 @@ max_line_length = 98
|
||||||
[*.yml]
|
[*.yml]
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
indent_style = space
|
indent_style = space
|
||||||
|
|
||||||
[*.json]
|
|
||||||
indent_size = 4
|
|
||||||
indent_style = space
|
|
||||||
|
|
4
.envrc
4
.envrc
|
@ -2,8 +2,6 @@
|
||||||
|
|
||||||
dotenv_if_exists
|
dotenv_if_exists
|
||||||
|
|
||||||
if [ -f /etc/os-release ] && grep -q '^ID=nixos' /etc/os-release; then
|
# use flake ".#${DIRENV_DEVSHELL:-default}"
|
||||||
use flake ".#${DIRENV_DEVSHELL:-default}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
PATH_add bin
|
PATH_add bin
|
||||||
|
|
|
@ -1,58 +0,0 @@
|
||||||
name: detect-runner-os
|
|
||||||
description: |
|
|
||||||
Detect the actual OS name and version of the runner.
|
|
||||||
Provides separate outputs for name, version, and a combined slug.
|
|
||||||
|
|
||||||
outputs:
|
|
||||||
name:
|
|
||||||
description: 'OS name (e.g. Ubuntu, Debian)'
|
|
||||||
value: ${{ steps.detect.outputs.name }}
|
|
||||||
version:
|
|
||||||
description: 'OS version (e.g. 22.04, 11)'
|
|
||||||
value: ${{ steps.detect.outputs.version }}
|
|
||||||
slug:
|
|
||||||
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
|
|
||||||
value: ${{ steps.detect.outputs.slug }}
|
|
||||||
node_major:
|
|
||||||
description: 'Major version of Node.js if available (e.g. 22)'
|
|
||||||
value: ${{ steps.detect.outputs.node_major }}
|
|
||||||
node_version:
|
|
||||||
description: 'Full Node.js version if available (e.g. 22.19.0)'
|
|
||||||
value: ${{ steps.detect.outputs.node_version }}
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Detect runner OS
|
|
||||||
id: detect
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# Detect OS version (try lsb_release first, fall back to /etc/os-release)
|
|
||||||
OS_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d'"' -f2)
|
|
||||||
|
|
||||||
# Detect OS name and capitalise (try lsb_release first, fall back to /etc/os-release)
|
|
||||||
OS_NAME=$(lsb_release -is 2>/dev/null || grep "^ID=" /etc/os-release | cut -d'=' -f2 | tr -d '"' | sed 's/\b\(.\)/\u\1/g')
|
|
||||||
|
|
||||||
# Create combined slug
|
|
||||||
OS_SLUG="${OS_NAME}-${OS_VERSION}"
|
|
||||||
|
|
||||||
# Detect Node.js version if available
|
|
||||||
if command -v node >/dev/null 2>&1; then
|
|
||||||
NODE_VERSION=$(node --version | sed 's/v//')
|
|
||||||
NODE_MAJOR=$(echo $NODE_VERSION | cut -d. -f1)
|
|
||||||
echo "node_version=${NODE_VERSION}" >> $GITHUB_OUTPUT
|
|
||||||
echo "node_major=${NODE_MAJOR}" >> $GITHUB_OUTPUT
|
|
||||||
echo "🔍 Detected Node.js: v${NODE_VERSION}"
|
|
||||||
else
|
|
||||||
echo "node_version=" >> $GITHUB_OUTPUT
|
|
||||||
echo "node_major=" >> $GITHUB_OUTPUT
|
|
||||||
echo "🔍 Node.js not found"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set OS outputs
|
|
||||||
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
|
|
||||||
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
|
|
||||||
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
# Log detection results
|
|
||||||
echo "🔍 Detected Runner OS: ${OS_NAME} ${OS_VERSION}"
|
|
27
.forgejo/actions/prefligit/action.yml
Normal file
27
.forgejo/actions/prefligit/action.yml
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
name: prefligit
|
||||||
|
description: |
|
||||||
|
Runs prefligit, pre-commit reimplemented in Rust.
|
||||||
|
inputs:
|
||||||
|
extra_args:
|
||||||
|
description: options to pass to pre-commit run
|
||||||
|
required: false
|
||||||
|
default: '--all-files'
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install uv
|
||||||
|
uses: https://github.com/astral-sh/setup-uv@v6
|
||||||
|
with:
|
||||||
|
enable-cache: true
|
||||||
|
ignore-nothing-to-cache: true
|
||||||
|
- name: Install Prefligit
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
curl --proto '=https' --tlsv1.2 -LsSf https://github.com/j178/prefligit/releases/download/v0.0.10/prefligit-installer.sh | sh
|
||||||
|
- uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.cache/prefligit
|
||||||
|
key: prefligit-0|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||||
|
- run: prefligit run --show-diff-on-failure --color=always -v ${{ inputs.extra_args }}
|
||||||
|
shell: bash
|
|
@ -2,12 +2,18 @@ name: sccache
|
||||||
description: |
|
description: |
|
||||||
Install sccache for caching builds in GitHub Actions.
|
Install sccache for caching builds in GitHub Actions.
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
token:
|
||||||
|
description: 'A Github PAT'
|
||||||
|
required: false
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Install sccache
|
- name: Install sccache
|
||||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
|
||||||
|
with:
|
||||||
|
token: ${{ inputs.token }}
|
||||||
- name: Configure sccache
|
- name: Configure sccache
|
||||||
uses: https://github.com/actions/github-script@v7
|
uses: https://github.com/actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -1,167 +0,0 @@
|
||||||
name: setup-llvm-with-apt
|
|
||||||
description: |
|
|
||||||
Set up LLVM toolchain with APT package management and smart caching.
|
|
||||||
Supports cross-compilation architectures and additional package installation.
|
|
||||||
|
|
||||||
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
dpkg-arch:
|
|
||||||
description: 'Debian architecture for cross-compilation (e.g. arm64)'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
extra-packages:
|
|
||||||
description: 'Additional APT packages to install (space-separated)'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
llvm-version:
|
|
||||||
description: 'LLVM version to install'
|
|
||||||
required: false
|
|
||||||
default: '20'
|
|
||||||
|
|
||||||
outputs:
|
|
||||||
llvm-version:
|
|
||||||
description: 'Installed LLVM version'
|
|
||||||
value: ${{ steps.configure.outputs.version }}
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Detect runner OS
|
|
||||||
id: runner-os
|
|
||||||
uses: ./.forgejo/actions/detect-runner-os
|
|
||||||
|
|
||||||
- name: Configure cross-compilation architecture
|
|
||||||
if: inputs.dpkg-arch != ''
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
|
|
||||||
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
|
|
||||||
|
|
||||||
# Restrict default sources to amd64
|
|
||||||
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
|
|
||||||
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
|
|
||||||
|
|
||||||
# Add ports sources for foreign architecture
|
|
||||||
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
|
|
||||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
|
|
||||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
|
|
||||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
|
|
||||||
|
|
||||||
- name: Start LLVM cache group
|
|
||||||
shell: bash
|
|
||||||
run: echo "::group::📦 Restoring LLVM cache"
|
|
||||||
|
|
||||||
- name: Check for LLVM cache
|
|
||||||
id: cache
|
|
||||||
uses: https://github.com/actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/usr/bin/clang-*
|
|
||||||
/usr/bin/clang++-*
|
|
||||||
/usr/bin/lld-*
|
|
||||||
/usr/bin/llvm-*
|
|
||||||
/usr/lib/llvm-*/
|
|
||||||
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
|
|
||||||
/usr/lib/x86_64-linux-gnu/libclang*.so*
|
|
||||||
/etc/apt/sources.list.d/archive_uri-*
|
|
||||||
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
|
|
||||||
key: llvm-${{ steps.runner-os.outputs.slug }}-v${{ inputs.llvm-version }}-v3-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
|
|
||||||
|
|
||||||
- name: End LLVM cache group
|
|
||||||
shell: bash
|
|
||||||
run: echo "::endgroup::"
|
|
||||||
|
|
||||||
- name: Check and install LLVM if needed
|
|
||||||
id: llvm-setup
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
|
|
||||||
|
|
||||||
# Check both binaries and libraries exist
|
|
||||||
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
|
|
||||||
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
|
|
||||||
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
|
|
||||||
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
|
|
||||||
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
|
|
||||||
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
|
|
||||||
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
|
|
||||||
echo "needs-install=false" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
|
|
||||||
|
|
||||||
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
|
|
||||||
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
|
|
||||||
echo "::endgroup::"
|
|
||||||
|
|
||||||
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
|
|
||||||
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
|
|
||||||
echo "needs-install=true" >> $GITHUB_OUTPUT
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Prepare for additional packages
|
|
||||||
if: inputs.extra-packages != ''
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# Update APT if LLVM was cached (installer script already does apt-get update)
|
|
||||||
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
|
|
||||||
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
|
|
||||||
sudo apt-get update
|
|
||||||
echo "::endgroup::"
|
|
||||||
fi
|
|
||||||
echo "::group::📦 Installing additional packages"
|
|
||||||
|
|
||||||
- name: Install additional packages
|
|
||||||
if: inputs.extra-packages != ''
|
|
||||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
|
|
||||||
with:
|
|
||||||
packages: ${{ inputs.extra-packages }}
|
|
||||||
version: 1.0
|
|
||||||
|
|
||||||
- name: End package installation group
|
|
||||||
if: inputs.extra-packages != ''
|
|
||||||
shell: bash
|
|
||||||
run: echo "::endgroup::"
|
|
||||||
|
|
||||||
- name: Configure LLVM environment
|
|
||||||
id: configure
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
|
|
||||||
|
|
||||||
# Create symlinks
|
|
||||||
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
|
|
||||||
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
|
|
||||||
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
|
|
||||||
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
|
|
||||||
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
|
|
||||||
echo " ✓ Created symlinks"
|
|
||||||
|
|
||||||
# Setup library paths
|
|
||||||
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
|
|
||||||
if [ -d "$LLVM_LIB_PATH" ]; then
|
|
||||||
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
|
|
||||||
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
|
|
||||||
sudo ldconfig
|
|
||||||
echo " ✓ Configured library paths"
|
|
||||||
else
|
|
||||||
# Fallback to standard library location
|
|
||||||
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
|
|
||||||
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
|
|
||||||
echo " ✓ Using fallback library path"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set output
|
|
||||||
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
|
|
||||||
echo "::endgroup::"
|
|
||||||
echo "✅ LLVM ready: $(clang --version | head -1)"
|
|
|
@ -1,226 +0,0 @@
|
||||||
name: setup-rust
|
|
||||||
description: |
|
|
||||||
Set up Rust toolchain with sccache for compilation caching.
|
|
||||||
Respects rust-toolchain.toml by default or accepts explicit version override.
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
cache-key-suffix:
|
|
||||||
description: 'Optional suffix for cache keys (e.g. platform identifier)'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
rust-components:
|
|
||||||
description: 'Additional Rust components to install (space-separated)'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
rust-target:
|
|
||||||
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
rust-version:
|
|
||||||
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
|
|
||||||
required: false
|
|
||||||
default: '1.87.0'
|
|
||||||
sccache-cache-limit:
|
|
||||||
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
|
|
||||||
required: false
|
|
||||||
default: '2G'
|
|
||||||
github-token:
|
|
||||||
description: 'GitHub token for downloading sccache from GitHub releases'
|
|
||||||
required: false
|
|
||||||
default: ''
|
|
||||||
|
|
||||||
outputs:
|
|
||||||
rust-version:
|
|
||||||
description: 'Installed Rust version'
|
|
||||||
value: ${{ steps.rust-setup.outputs.version }}
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Detect runner OS
|
|
||||||
id: runner-os
|
|
||||||
uses: ./.forgejo/actions/detect-runner-os
|
|
||||||
|
|
||||||
- name: Configure Cargo environment
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# Use workspace-relative paths for better control and consistency
|
|
||||||
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
|
|
||||||
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
|
|
||||||
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
|
|
||||||
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
# Limit binstall resolution timeout to avoid GitHub rate limit delays
|
|
||||||
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
# Ensure directories exist for first run
|
|
||||||
mkdir -p "${{ github.workspace }}/.cargo"
|
|
||||||
mkdir -p "${{ github.workspace }}/.sccache"
|
|
||||||
mkdir -p "${{ github.workspace }}/target"
|
|
||||||
mkdir -p "${{ github.workspace }}/.rustup"
|
|
||||||
|
|
||||||
- name: Start cache restore group
|
|
||||||
shell: bash
|
|
||||||
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
|
|
||||||
|
|
||||||
- name: Cache Cargo registry and git
|
|
||||||
id: registry-cache
|
|
||||||
uses: https://github.com/actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
.cargo/registry/index
|
|
||||||
.cargo/registry/cache
|
|
||||||
.cargo/git/db
|
|
||||||
# Registry cache saved per workflow, restored from any workflow's cache
|
|
||||||
# Each workflow maintains its own registry that accumulates its needed crates
|
|
||||||
key: cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ github.workflow }}
|
|
||||||
restore-keys: |
|
|
||||||
cargo-registry-${{ steps.runner-os.outputs.slug }}-
|
|
||||||
|
|
||||||
- name: Cache toolchain binaries
|
|
||||||
id: toolchain-cache
|
|
||||||
uses: https://github.com/actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
.cargo/bin
|
|
||||||
.rustup/toolchains
|
|
||||||
.rustup/update-hashes
|
|
||||||
# Shared toolchain cache across all Rust versions
|
|
||||||
key: toolchain-${{ steps.runner-os.outputs.slug }}
|
|
||||||
|
|
||||||
|
|
||||||
- name: Setup sccache
|
|
||||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
|
||||||
|
|
||||||
- name: Cache build artifacts
|
|
||||||
id: build-cache
|
|
||||||
uses: https://github.com/actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
target/**/deps
|
|
||||||
!target/**/deps/*.rlib
|
|
||||||
target/**/build
|
|
||||||
target/**/.fingerprint
|
|
||||||
target/**/incremental
|
|
||||||
target/**/*.d
|
|
||||||
/timelord/
|
|
||||||
# Build artifacts - cache per code change, restore from deps when code changes
|
|
||||||
key: >-
|
|
||||||
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
|
|
||||||
restore-keys: |
|
|
||||||
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
|
|
||||||
|
|
||||||
- name: End cache restore group
|
|
||||||
shell: bash
|
|
||||||
run: echo "::endgroup::"
|
|
||||||
|
|
||||||
- name: Setup Rust toolchain
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# Install rustup if not already cached
|
|
||||||
if ! command -v rustup &> /dev/null; then
|
|
||||||
echo "::group::📦 Installing rustup"
|
|
||||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
|
|
||||||
source "$CARGO_HOME/env"
|
|
||||||
echo "::endgroup::"
|
|
||||||
else
|
|
||||||
echo "✅ rustup already available"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Setup the appropriate Rust version
|
|
||||||
if [[ -n "${{ inputs.rust-version }}" ]]; then
|
|
||||||
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
|
|
||||||
# Set override first to prevent rust-toolchain.toml from auto-installing
|
|
||||||
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
|
|
||||||
|
|
||||||
# Check if we need to install/update the toolchain
|
|
||||||
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
|
|
||||||
rustup update ${{ inputs.rust-version }}
|
|
||||||
else
|
|
||||||
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
|
|
||||||
rustup show
|
|
||||||
fi
|
|
||||||
echo "::endgroup::"
|
|
||||||
|
|
||||||
- name: Configure PATH and install tools
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
|
||||||
run: |
|
|
||||||
# Add .cargo/bin to PATH permanently for all subsequent steps
|
|
||||||
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
|
|
||||||
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
|
|
||||||
|
|
||||||
# Install cargo-binstall for fast binary installations
|
|
||||||
if command -v cargo-binstall &> /dev/null; then
|
|
||||||
echo "✅ cargo-binstall already available"
|
|
||||||
else
|
|
||||||
echo "::group::📦 Installing cargo-binstall"
|
|
||||||
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
|
||||||
echo "::endgroup::"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if command -v prek &> /dev/null; then
|
|
||||||
echo "✅ prek already available"
|
|
||||||
else
|
|
||||||
echo "::group::📦 Installing prek"
|
|
||||||
# prek isn't regularly published to crates.io, so we use git source
|
|
||||||
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
|
|
||||||
echo "::endgroup::"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if command -v timelord &> /dev/null; then
|
|
||||||
echo "✅ timelord already available"
|
|
||||||
else
|
|
||||||
echo "::group::📦 Installing timelord"
|
|
||||||
cargo-binstall -y --no-symlinks timelord-cli
|
|
||||||
echo "::endgroup::"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Configure sccache environment
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
|
||||||
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
|
||||||
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
|
||||||
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
|
||||||
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
# Configure incremental compilation GC
|
|
||||||
# If we restored from old cache (partial hit), clean up aggressively
|
|
||||||
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
|
|
||||||
echo "♻️ Partial cache hit - enabling cache cleanup"
|
|
||||||
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Install Rust components
|
|
||||||
if: inputs.rust-components != ''
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "📦 Installing components: ${{ inputs.rust-components }}"
|
|
||||||
rustup component add ${{ inputs.rust-components }}
|
|
||||||
|
|
||||||
- name: Install Rust target
|
|
||||||
if: inputs.rust-target != ''
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "📦 Installing target: ${{ inputs.rust-target }}"
|
|
||||||
rustup target add ${{ inputs.rust-target }}
|
|
||||||
|
|
||||||
- name: Output version and summary
|
|
||||||
id: rust-setup
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
|
|
||||||
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
echo "📋 Setup complete:"
|
|
||||||
echo " Rust: $(rustc --version)"
|
|
||||||
echo " Cargo: $(cargo --version)"
|
|
||||||
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
|
|
||||||
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"
|
|
|
@ -49,23 +49,10 @@ jobs:
|
||||||
cp ./docs/static/_headers ./public/_headers
|
cp ./docs/static/_headers ./public/_headers
|
||||||
echo "Copied .well-known files and _headers to ./public"
|
echo "Copied .well-known files and _headers to ./public"
|
||||||
|
|
||||||
- name: Detect runner environment
|
|
||||||
id: runner-env
|
|
||||||
uses: ./.forgejo/actions/detect-runner-os
|
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
|
|
||||||
uses: https://github.com/actions/setup-node@v4
|
uses: https://github.com/actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 22
|
node-version: 20
|
||||||
|
|
||||||
- name: Cache npm dependencies
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/.npm
|
|
||||||
key: ${{ steps.runner-env.outputs.slug }}-node-${{ hashFiles('**/package-lock.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ steps.runner-env.outputs.slug }}-node-
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: npm install --save-dev wrangler@latest
|
run: npm install --save-dev wrangler@latest
|
||||||
|
|
22
.forgejo/workflows/prefligit-checks.yml
Normal file
22
.forgejo/workflows/prefligit-checks.yml
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
name: Checks / Prefligit
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
prefligit:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }}
|
||||||
|
TO_REF: ${{ github.sha }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
- uses: ./.forgejo/actions/prefligit
|
||||||
|
with:
|
||||||
|
extra_args: --all-files --hook-stage manual
|
|
@ -1,83 +0,0 @@
|
||||||
name: Checks / Prek
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
fast-checks:
|
|
||||||
name: Pre-commit & Formatting
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Setup Rust nightly
|
|
||||||
uses: ./.forgejo/actions/setup-rust
|
|
||||||
with:
|
|
||||||
rust-version: nightly
|
|
||||||
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
|
||||||
|
|
||||||
- name: Run prek
|
|
||||||
run: |
|
|
||||||
prek run \
|
|
||||||
--all-files \
|
|
||||||
--hook-stage manual \
|
|
||||||
--show-diff-on-failure \
|
|
||||||
--color=always \
|
|
||||||
-v
|
|
||||||
|
|
||||||
- name: Check Rust formatting
|
|
||||||
run: |
|
|
||||||
cargo +nightly fmt --all -- --check && \
|
|
||||||
echo "✅ Formatting check passed" || \
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
clippy-and-tests:
|
|
||||||
name: Clippy and Cargo Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Setup LLVM
|
|
||||||
uses: ./.forgejo/actions/setup-llvm-with-apt
|
|
||||||
with:
|
|
||||||
extra-packages: liburing-dev liburing2
|
|
||||||
|
|
||||||
- name: Setup Rust with caching
|
|
||||||
uses: ./.forgejo/actions/setup-rust
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
|
||||||
|
|
||||||
- name: Run Clippy lints
|
|
||||||
run: |
|
|
||||||
cargo clippy \
|
|
||||||
--workspace \
|
|
||||||
--features full \
|
|
||||||
--locked \
|
|
||||||
--no-deps \
|
|
||||||
--profile test \
|
|
||||||
-- \
|
|
||||||
-D warnings
|
|
||||||
|
|
||||||
- name: Run Cargo tests
|
|
||||||
run: |
|
|
||||||
cargo test \
|
|
||||||
--workspace \
|
|
||||||
--features full \
|
|
||||||
--locked \
|
|
||||||
--profile test \
|
|
||||||
--all-targets \
|
|
||||||
--no-fail-fast
|
|
|
@ -3,25 +3,15 @@ concurrency:
|
||||||
group: "release-image-${{ github.ref }}"
|
group: "release-image-${{ github.ref }}"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
|
||||||
paths-ignore:
|
|
||||||
- "*.md"
|
|
||||||
- "**/*.md"
|
|
||||||
- ".gitlab-ci.yml"
|
|
||||||
- ".gitignore"
|
|
||||||
- "renovate.json"
|
|
||||||
- "pkg/**"
|
|
||||||
- "docs/**"
|
|
||||||
push:
|
push:
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- "*.md"
|
- "*.md"
|
||||||
- "**/*.md"
|
- "**/*.md"
|
||||||
- ".gitlab-ci.yml"
|
- ".gitlab-ci.yml"
|
||||||
- ".gitignore"
|
- ".gitignore"
|
||||||
- "renovate.json"
|
- "renovate.json"
|
||||||
- "pkg/**"
|
- "debian/**"
|
||||||
|
- "docker/**"
|
||||||
- "docs/**"
|
- "docs/**"
|
||||||
# Allows you to run this workflow manually from the Actions tab
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
@ -53,9 +43,6 @@ jobs:
|
||||||
let images = []
|
let images = []
|
||||||
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
||||||
images.push(builtinImage)
|
images.push(builtinImage)
|
||||||
} else {
|
|
||||||
// Fallback to official registry for forks/PRs without credentials
|
|
||||||
images.push('forgejo.ellis.link/continuwuation/continuwuity')
|
|
||||||
}
|
}
|
||||||
core.setOutput('images', images.join("\n"))
|
core.setOutput('images', images.join("\n"))
|
||||||
core.setOutput('images_list', images.join(","))
|
core.setOutput('images_list', images.join(","))
|
||||||
|
@ -101,22 +88,15 @@ jobs:
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Install rust
|
- name: Install rust
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
|
||||||
id: rust-toolchain
|
id: rust-toolchain
|
||||||
uses: ./.forgejo/actions/rust-toolchain
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
|
||||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
|
||||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
|
||||||
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||||
- name: Login to builtin registry
|
- name: Login to builtin registry
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||||
|
@ -142,21 +122,15 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
||||||
echo "Short SHA: $calculatedSha"
|
|
||||||
- name: Get Git commit timestamps
|
- name: Get Git commit timestamps
|
||||||
run: |
|
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
|
||||||
timestamp=$(git log -1 --pretty=%ct)
|
|
||||||
echo "TIMESTAMP=$timestamp" >> $GITHUB_ENV
|
|
||||||
echo "Commit timestamp: $timestamp"
|
|
||||||
|
|
||||||
- uses: ./.forgejo/actions/timelord
|
- uses: ./.forgejo/actions/timelord
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
|
||||||
with:
|
with:
|
||||||
key: timelord-v0
|
key: timelord-v0
|
||||||
path: .
|
path: .
|
||||||
|
|
||||||
- name: Cache Rust registry
|
- name: Cache Rust registry
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
|
@ -166,7 +140,6 @@ jobs:
|
||||||
.cargo/registry/src
|
.cargo/registry/src
|
||||||
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
|
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
|
||||||
- name: Cache cargo target
|
- name: Cache cargo target
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
|
||||||
id: cache-cargo-target
|
id: cache-cargo-target
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
|
@ -174,7 +147,6 @@ jobs:
|
||||||
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||||
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
|
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
|
||||||
- name: Cache apt cache
|
- name: Cache apt cache
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
|
||||||
id: cache-apt
|
id: cache-apt
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
|
@ -182,7 +154,6 @@ jobs:
|
||||||
var-cache-apt-${{ matrix.slug }}
|
var-cache-apt-${{ matrix.slug }}
|
||||||
key: var-cache-apt-${{ matrix.slug }}
|
key: var-cache-apt-${{ matrix.slug }}
|
||||||
- name: Cache apt lib
|
- name: Cache apt lib
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
|
||||||
id: cache-apt-lib
|
id: cache-apt-lib
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
|
@ -190,8 +161,7 @@ jobs:
|
||||||
var-lib-apt-${{ matrix.slug }}
|
var-lib-apt-${{ matrix.slug }}
|
||||||
key: var-lib-apt-${{ matrix.slug }}
|
key: var-lib-apt-${{ matrix.slug }}
|
||||||
- name: inject cache into docker
|
- name: inject cache into docker
|
||||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0
|
||||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
|
|
||||||
with:
|
with:
|
||||||
cache-map: |
|
cache-map: |
|
||||||
{
|
{
|
||||||
|
@ -213,7 +183,7 @@ jobs:
|
||||||
context: .
|
context: .
|
||||||
file: "docker/Dockerfile"
|
file: "docker/Dockerfile"
|
||||||
build-args: |
|
build-args: |
|
||||||
GIT_COMMIT_HASH=${{ github.sha }}
|
GIT_COMMIT_HASH=${{ github.sha }})
|
||||||
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
||||||
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
||||||
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
||||||
|
@ -223,23 +193,27 @@ jobs:
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
# cache-to: type=gha,mode=max
|
# cache-to: type=gha,mode=max
|
||||||
sbom: true
|
sbom: true
|
||||||
outputs: |
|
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||||
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', needs.define-variables.outputs.images_list) || format('type=image,"name={0}",push=false', needs.define-variables.outputs.images_list) }}
|
|
||||||
type=local,dest=/tmp/binaries
|
|
||||||
env:
|
env:
|
||||||
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
||||||
|
|
||||||
# For publishing multi-platform manifests
|
# For publishing multi-platform manifests
|
||||||
- name: Export digest
|
- name: Export digest
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
run: |
|
run: |
|
||||||
mkdir -p /tmp/digests
|
mkdir -p /tmp/digests
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
digest="${{ steps.build.outputs.digest }}"
|
||||||
touch "/tmp/digests/${digest#sha256:}"
|
touch "/tmp/digests/${digest#sha256:}"
|
||||||
|
|
||||||
# Binary extracted via local output for all builds
|
- name: Extract binary from container (image)
|
||||||
- name: Rename extracted binary
|
id: extract-binary-image
|
||||||
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
run: |
|
||||||
|
mkdir -p /tmp/binaries
|
||||||
|
digest="${{ steps.build.outputs.digest }}"
|
||||||
|
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.define-variables.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
|
||||||
|
- name: Extract binary from container (copy)
|
||||||
|
run: docker cp ${{ steps.extract-binary-image.outputs.container_id }}:/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||||
|
- name: Extract binary from container (cleanup)
|
||||||
|
run: docker rm ${{ steps.extract-binary-image.outputs.container_id }}
|
||||||
|
|
||||||
- name: Upload binary artifact
|
- name: Upload binary artifact
|
||||||
uses: forgejo/upload-artifact@v4
|
uses: forgejo/upload-artifact@v4
|
||||||
|
@ -249,7 +223,6 @@ jobs:
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
|
|
||||||
- name: Upload digest
|
- name: Upload digest
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
uses: forgejo/upload-artifact@v4
|
uses: forgejo/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: digests-${{ matrix.slug }}
|
name: digests-${{ matrix.slug }}
|
||||||
|
@ -262,7 +235,6 @@ jobs:
|
||||||
needs: [define-variables, build-image]
|
needs: [define-variables, build-image]
|
||||||
steps:
|
steps:
|
||||||
- name: Download digests
|
- name: Download digests
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
uses: forgejo/download-artifact@v4
|
uses: forgejo/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
path: /tmp/digests
|
path: /tmp/digests
|
||||||
|
@ -270,7 +242,6 @@ jobs:
|
||||||
merge-multiple: true
|
merge-multiple: true
|
||||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||||
- name: Login to builtin registry
|
- name: Login to builtin registry
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||||
|
@ -278,15 +249,9 @@ jobs:
|
||||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
|
||||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
|
||||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
|
||||||
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
|
||||||
|
|
||||||
- name: Extract metadata (tags) for Docker
|
- name: Extract metadata (tags) for Docker
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
|
@ -304,7 +269,6 @@ jobs:
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||||
|
|
||||||
- name: Create manifest list and push
|
- name: Create manifest list and push
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
working-directory: /tmp/digests
|
working-directory: /tmp/digests
|
||||||
env:
|
env:
|
||||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||||
|
@ -322,7 +286,6 @@ jobs:
|
||||||
done
|
done
|
||||||
|
|
||||||
- name: Inspect image
|
- name: Inspect image
|
||||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
|
||||||
env:
|
env:
|
||||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
|
@ -1,111 +0,0 @@
|
||||||
name: Maintenance / Renovate
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
# Run at 5am UTC daily to avoid late-night dev
|
|
||||||
- cron: '0 5 * * *'
|
|
||||||
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
dryRun:
|
|
||||||
description: 'Dry run mode'
|
|
||||||
required: false
|
|
||||||
default: null
|
|
||||||
type: choice
|
|
||||||
options:
|
|
||||||
- null
|
|
||||||
- 'extract'
|
|
||||||
- 'lookup'
|
|
||||||
- 'full'
|
|
||||||
logLevel:
|
|
||||||
description: 'Log level'
|
|
||||||
required: false
|
|
||||||
default: 'info'
|
|
||||||
type: choice
|
|
||||||
options:
|
|
||||||
- 'info'
|
|
||||||
- 'warning'
|
|
||||||
- 'critical'
|
|
||||||
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
# Re-run when config changes
|
|
||||||
- '.forgejo/workflows/renovate.yml'
|
|
||||||
- 'renovate.json'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
renovate:
|
|
||||||
name: Renovate
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container:
|
|
||||||
image: ghcr.io/renovatebot/renovate:41
|
|
||||||
options: --tmpfs /tmp:exec
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
show-progress: false
|
|
||||||
|
|
||||||
- name: print node heap
|
|
||||||
run: /usr/local/renovate/node -e 'console.log(`node heap limit = ${require("v8").getHeapStatistics().heap_size_limit / (1024 * 1024)} Mb`)'
|
|
||||||
|
|
||||||
- name: Restore renovate repo cache
|
|
||||||
uses: https://github.com/actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/tmp/renovate/cache/renovate/repository
|
|
||||||
key: repo-cache-${{ github.run_id }}
|
|
||||||
restore-keys: |
|
|
||||||
repo-cache-
|
|
||||||
|
|
||||||
- name: Restore renovate package cache
|
|
||||||
uses: https://github.com/actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/tmp/renovate/cache/renovate/renovate-cache-sqlite
|
|
||||||
key: package-cache-${{ github.run_id }}
|
|
||||||
restore-keys: |
|
|
||||||
package-cache-
|
|
||||||
|
|
||||||
- name: Self-hosted Renovate
|
|
||||||
uses: https://github.com/renovatebot/github-action@v43.0.9
|
|
||||||
env:
|
|
||||||
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
|
|
||||||
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
|
|
||||||
|
|
||||||
RENOVATE_PLATFORM: forgejo
|
|
||||||
RENOVATE_ENDPOINT: ${{ github.server_url }}
|
|
||||||
RENOVATE_AUTODISCOVER: 'false'
|
|
||||||
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
|
|
||||||
|
|
||||||
RENOVATE_GIT_TIMEOUT: 60000
|
|
||||||
|
|
||||||
RENOVATE_REQUIRE_CONFIG: 'required'
|
|
||||||
RENOVATE_ONBOARDING: 'false'
|
|
||||||
|
|
||||||
RENOVATE_PR_COMMITS_PER_RUN_LIMIT: 3
|
|
||||||
|
|
||||||
RENOVATE_GITHUB_TOKEN_WARN: 'false'
|
|
||||||
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
|
|
||||||
GITHUB_COM_TOKEN: ${{ secrets.GH_PUBLIC_RO }}
|
|
||||||
|
|
||||||
RENOVATE_REPOSITORY_CACHE: 'enabled'
|
|
||||||
RENOVATE_X_SQLITE_PACKAGE_CACHE: true
|
|
||||||
|
|
||||||
- name: Save renovate repo cache
|
|
||||||
if: always() && env.RENOVATE_DRY_RUN != 'full'
|
|
||||||
uses: https://github.com/actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/tmp/renovate/cache/renovate/repository
|
|
||||||
key: repo-cache-${{ github.run_id }}
|
|
||||||
|
|
||||||
- name: Save renovate package cache
|
|
||||||
if: always() && env.RENOVATE_DRY_RUN != 'full'
|
|
||||||
uses: https://github.com/actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/tmp/renovate/cache/renovate/renovate-cache-sqlite
|
|
||||||
key: package-cache-${{ github.run_id }}
|
|
144
.forgejo/workflows/rust-checks.yml
Normal file
144
.forgejo/workflows/rust-checks.yml
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
name: Checks / Rust
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
format:
|
||||||
|
name: Format
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Install rust
|
||||||
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
with:
|
||||||
|
toolchain: "nightly"
|
||||||
|
components: "rustfmt"
|
||||||
|
|
||||||
|
- name: Check formatting
|
||||||
|
run: |
|
||||||
|
cargo +nightly fmt --all -- --check
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
name: Clippy
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Install rust
|
||||||
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
|
||||||
|
- uses: https://github.com/actions/create-github-app-token@v2
|
||||||
|
id: app-token
|
||||||
|
with:
|
||||||
|
app-id: ${{ vars.GH_APP_ID }}
|
||||||
|
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||||
|
github-api-url: https://api.github.com
|
||||||
|
owner: ${{ vars.GH_APP_OWNER }}
|
||||||
|
repositories: ""
|
||||||
|
- name: Install sccache
|
||||||
|
uses: ./.forgejo/actions/sccache
|
||||||
|
with:
|
||||||
|
token: ${{ steps.app-token.outputs.token }}
|
||||||
|
- run: sudo apt-get update
|
||||||
|
- name: Install system dependencies
|
||||||
|
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
||||||
|
with:
|
||||||
|
packages: clang liburing-dev
|
||||||
|
version: 1
|
||||||
|
- name: Cache Rust registry
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/git
|
||||||
|
!~/.cargo/git/checkouts
|
||||||
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
|
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||||
|
- name: Timelord
|
||||||
|
uses: ./.forgejo/actions/timelord
|
||||||
|
with:
|
||||||
|
key: sccache-v0
|
||||||
|
path: .
|
||||||
|
- name: Clippy
|
||||||
|
run: |
|
||||||
|
cargo clippy \
|
||||||
|
--workspace \
|
||||||
|
--features full \
|
||||||
|
--locked \
|
||||||
|
--no-deps \
|
||||||
|
--profile test \
|
||||||
|
-- \
|
||||||
|
-D warnings
|
||||||
|
|
||||||
|
- name: Show sccache stats
|
||||||
|
if: always()
|
||||||
|
run: sccache --show-stats
|
||||||
|
|
||||||
|
cargo-test:
|
||||||
|
name: Cargo Test
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Install rust
|
||||||
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
|
||||||
|
- uses: https://github.com/actions/create-github-app-token@v2
|
||||||
|
id: app-token
|
||||||
|
with:
|
||||||
|
app-id: ${{ vars.GH_APP_ID }}
|
||||||
|
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||||
|
github-api-url: https://api.github.com
|
||||||
|
owner: ${{ vars.GH_APP_OWNER }}
|
||||||
|
repositories: ""
|
||||||
|
- name: Install sccache
|
||||||
|
uses: ./.forgejo/actions/sccache
|
||||||
|
with:
|
||||||
|
token: ${{ steps.app-token.outputs.token }}
|
||||||
|
- run: sudo apt-get update
|
||||||
|
- name: Install system dependencies
|
||||||
|
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
||||||
|
with:
|
||||||
|
packages: clang liburing-dev
|
||||||
|
version: 1
|
||||||
|
- name: Cache Rust registry
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/git
|
||||||
|
!~/.cargo/git/checkouts
|
||||||
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
|
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||||
|
- name: Timelord
|
||||||
|
uses: ./.forgejo/actions/timelord
|
||||||
|
with:
|
||||||
|
key: sccache-v0
|
||||||
|
path: .
|
||||||
|
- name: Cargo Test
|
||||||
|
run: |
|
||||||
|
cargo test \
|
||||||
|
--workspace \
|
||||||
|
--features full \
|
||||||
|
--locked \
|
||||||
|
--profile test \
|
||||||
|
--all-targets \
|
||||||
|
--no-fail-fast
|
||||||
|
|
||||||
|
- name: Show sccache stats
|
||||||
|
if: always()
|
||||||
|
run: sccache --show-stats
|
1
.mailmap
1
.mailmap
|
@ -13,4 +13,3 @@ Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
|
||||||
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
|
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
|
||||||
Timo Kösters <timo@koesters.xyz>
|
Timo Kösters <timo@koesters.xyz>
|
||||||
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
|
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
|
||||||
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v5.0.0
|
rev: v5.0.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: fix-byte-order-marker
|
- id: check-byte-order-marker
|
||||||
- id: check-case-conflict
|
- id: check-case-conflict
|
||||||
- id: check-symlinks
|
- id: check-symlinks
|
||||||
- id: destroyed-symlinks
|
- id: destroyed-symlinks
|
||||||
|
|
1265
Cargo.lock
generated
1265
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
66
Cargo.toml
66
Cargo.toml
|
@ -48,15 +48,15 @@ features = ["ffi", "std", "union"]
|
||||||
version = "0.6.2"
|
version = "0.6.2"
|
||||||
|
|
||||||
[workspace.dependencies.ctor]
|
[workspace.dependencies.ctor]
|
||||||
version = "0.5.0"
|
version = "0.2.9"
|
||||||
|
|
||||||
[workspace.dependencies.cargo_toml]
|
[workspace.dependencies.cargo_toml]
|
||||||
version = "0.22"
|
version = "0.21"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["features"]
|
features = ["features"]
|
||||||
|
|
||||||
[workspace.dependencies.toml]
|
[workspace.dependencies.toml]
|
||||||
version = "0.9.5"
|
version = "0.8.14"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["parse"]
|
features = ["parse"]
|
||||||
|
|
||||||
|
@ -352,7 +352,7 @@ version = "0.1.2"
|
||||||
[workspace.dependencies.ruma]
|
[workspace.dependencies.ruma]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||||
#branch = "conduwuit-changes"
|
#branch = "conduwuit-changes"
|
||||||
rev = "8fb268fa2771dfc3a1c8075ef1246e7c9a0a53fd"
|
rev = "b753738047d1f443aca870896ef27ecaacf027da"
|
||||||
features = [
|
features = [
|
||||||
"compat",
|
"compat",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -411,28 +411,25 @@ default-features = false
|
||||||
|
|
||||||
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
|
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
|
||||||
[workspace.dependencies.opentelemetry]
|
[workspace.dependencies.opentelemetry]
|
||||||
version = "0.30.0"
|
version = "0.21.0"
|
||||||
|
|
||||||
[workspace.dependencies.tracing-flame]
|
[workspace.dependencies.tracing-flame]
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
|
|
||||||
[workspace.dependencies.tracing-opentelemetry]
|
[workspace.dependencies.tracing-opentelemetry]
|
||||||
version = "0.31.0"
|
version = "0.22.0"
|
||||||
|
|
||||||
[workspace.dependencies.opentelemetry_sdk]
|
[workspace.dependencies.opentelemetry_sdk]
|
||||||
version = "0.30.0"
|
version = "0.21.2"
|
||||||
features = ["rt-tokio"]
|
features = ["rt-tokio"]
|
||||||
|
|
||||||
[workspace.dependencies.opentelemetry-otlp]
|
[workspace.dependencies.opentelemetry-jaeger]
|
||||||
version = "0.30.0"
|
version = "0.20.0"
|
||||||
features = ["http", "trace", "logs", "metrics"]
|
features = ["rt-tokio"]
|
||||||
|
|
||||||
[workspace.dependencies.opentelemetry-jaeger-propagator]
|
|
||||||
version = "0.30.0"
|
|
||||||
|
|
||||||
# optional sentry metrics for crash/panic reporting
|
# optional sentry metrics for crash/panic reporting
|
||||||
[workspace.dependencies.sentry]
|
[workspace.dependencies.sentry]
|
||||||
version = "0.42.0"
|
version = "0.37.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
|
@ -448,9 +445,9 @@ features = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.sentry-tracing]
|
[workspace.dependencies.sentry-tracing]
|
||||||
version = "0.42.0"
|
version = "0.37.0"
|
||||||
[workspace.dependencies.sentry-tower]
|
[workspace.dependencies.sentry-tower]
|
||||||
version = "0.42.0"
|
version = "0.37.0"
|
||||||
|
|
||||||
# jemalloc usage
|
# jemalloc usage
|
||||||
[workspace.dependencies.tikv-jemalloc-sys]
|
[workspace.dependencies.tikv-jemalloc-sys]
|
||||||
|
@ -479,7 +476,7 @@ features = ["use_std"]
|
||||||
version = "0.4"
|
version = "0.4"
|
||||||
|
|
||||||
[workspace.dependencies.nix]
|
[workspace.dependencies.nix]
|
||||||
version = "0.30.1"
|
version = "0.29.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["resource"]
|
features = ["resource"]
|
||||||
|
|
||||||
|
@ -501,7 +498,7 @@ version = "0.4.3"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.termimad]
|
[workspace.dependencies.termimad]
|
||||||
version = "0.34.0"
|
version = "0.31.2"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.checked_ops]
|
[workspace.dependencies.checked_ops]
|
||||||
|
@ -539,21 +536,16 @@ version = "0.2"
|
||||||
version = "0.2"
|
version = "0.2"
|
||||||
|
|
||||||
[workspace.dependencies.minicbor]
|
[workspace.dependencies.minicbor]
|
||||||
version = "2.1.1"
|
version = "0.26.3"
|
||||||
features = ["std"]
|
features = ["std"]
|
||||||
|
|
||||||
[workspace.dependencies.minicbor-serde]
|
[workspace.dependencies.minicbor-serde]
|
||||||
version = "0.6.0"
|
version = "0.4.1"
|
||||||
features = ["std"]
|
features = ["std"]
|
||||||
|
|
||||||
[workspace.dependencies.maplit]
|
[workspace.dependencies.maplit]
|
||||||
version = "1.0.2"
|
version = "1.0.2"
|
||||||
|
|
||||||
[workspace.dependencies.ldap3]
|
|
||||||
version = "0.11.5"
|
|
||||||
default-features = false
|
|
||||||
features = ["sync", "tls-rustls"]
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Patches
|
# Patches
|
||||||
#
|
#
|
||||||
|
@ -767,6 +759,25 @@ incremental = true
|
||||||
|
|
||||||
[profile.dev.package.conduwuit_core]
|
[profile.dev.package.conduwuit_core]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
|
#rustflags = [
|
||||||
|
# '--cfg', 'conduwuit_mods',
|
||||||
|
# '-Ztime-passes',
|
||||||
|
# '-Zmir-opt-level=0',
|
||||||
|
# '-Ztls-model=initial-exec',
|
||||||
|
# '-Cprefer-dynamic=true',
|
||||||
|
# '-Zstaticlib-prefer-dynamic=true',
|
||||||
|
# '-Zstaticlib-allow-rdylib-deps=true',
|
||||||
|
# '-Zpacked-bundled-libs=false',
|
||||||
|
# '-Zplt=true',
|
||||||
|
# '-Clink-arg=-Wl,--as-needed',
|
||||||
|
# '-Clink-arg=-Wl,--allow-shlib-undefined',
|
||||||
|
# '-Clink-arg=-Wl,-z,lazy',
|
||||||
|
# '-Clink-arg=-Wl,-z,unique',
|
||||||
|
# '-Clink-arg=-Wl,-z,nodlopen',
|
||||||
|
# '-Clink-arg=-Wl,-z,nodelete',
|
||||||
|
#]
|
||||||
|
[profile.dev.package.xtask-generate-commands]
|
||||||
|
inherits = "dev"
|
||||||
[profile.dev.package.conduwuit]
|
[profile.dev.package.conduwuit]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
#rustflags = [
|
#rustflags = [
|
||||||
|
@ -856,7 +867,7 @@ unused-qualifications = "warn"
|
||||||
#unused-results = "warn" # TODO
|
#unused-results = "warn" # TODO
|
||||||
|
|
||||||
## some sadness
|
## some sadness
|
||||||
mismatched_lifetime_syntaxes = "allow" # TODO!
|
elided_named_lifetimes = "allow" # TODO!
|
||||||
let_underscore_drop = "allow"
|
let_underscore_drop = "allow"
|
||||||
missing_docs = "allow"
|
missing_docs = "allow"
|
||||||
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
|
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
|
||||||
|
@ -995,6 +1006,3 @@ literal_string_with_formatting_args = { level = "allow", priority = 1 }
|
||||||
|
|
||||||
|
|
||||||
needless_raw_string_hashes = "allow"
|
needless_raw_string_hashes = "allow"
|
||||||
|
|
||||||
# TODO: Enable this lint & fix all instances
|
|
||||||
collapsible_if = "allow"
|
|
||||||
|
|
83
arch/conduwuit.service
Normal file
83
arch/conduwuit.service
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
[Unit]
|
||||||
|
|
||||||
|
Description=Continuwuity - Matrix homeserver
|
||||||
|
Wants=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
Documentation=https://continuwuity.org/
|
||||||
|
RequiresMountsFor=/var/lib/private/conduwuit
|
||||||
|
Alias=matrix-conduwuit.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
DynamicUser=yes
|
||||||
|
Type=notify-reload
|
||||||
|
ReloadSignal=SIGUSR1
|
||||||
|
|
||||||
|
TTYPath=/dev/tty25
|
||||||
|
DeviceAllow=char-tty
|
||||||
|
StandardInput=tty-force
|
||||||
|
StandardOutput=tty
|
||||||
|
StandardError=journal+console
|
||||||
|
|
||||||
|
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||||
|
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||||
|
|
||||||
|
TTYReset=yes
|
||||||
|
# uncomment to allow buffer to be cleared every restart
|
||||||
|
TTYVTDisallocate=no
|
||||||
|
|
||||||
|
TTYColumns=120
|
||||||
|
TTYRows=40
|
||||||
|
|
||||||
|
AmbientCapabilities=
|
||||||
|
CapabilityBoundingSet=
|
||||||
|
|
||||||
|
DevicePolicy=closed
|
||||||
|
LockPersonality=yes
|
||||||
|
MemoryDenyWriteExecute=yes
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
#ProcSubset=pid
|
||||||
|
ProtectClock=yes
|
||||||
|
ProtectControlGroups=yes
|
||||||
|
ProtectHome=yes
|
||||||
|
ProtectHostname=yes
|
||||||
|
ProtectKernelLogs=yes
|
||||||
|
ProtectKernelModules=yes
|
||||||
|
ProtectKernelTunables=yes
|
||||||
|
ProtectProc=invisible
|
||||||
|
ProtectSystem=strict
|
||||||
|
PrivateDevices=yes
|
||||||
|
PrivateMounts=yes
|
||||||
|
PrivateTmp=yes
|
||||||
|
PrivateUsers=yes
|
||||||
|
PrivateIPC=yes
|
||||||
|
RemoveIPC=yes
|
||||||
|
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||||
|
RestrictNamespaces=yes
|
||||||
|
RestrictRealtime=yes
|
||||||
|
RestrictSUIDSGID=yes
|
||||||
|
SystemCallArchitectures=native
|
||||||
|
SystemCallFilter=@system-service @resources
|
||||||
|
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||||
|
SystemCallErrorNumber=EPERM
|
||||||
|
StateDirectory=conduwuit
|
||||||
|
|
||||||
|
RuntimeDirectory=conduwuit
|
||||||
|
RuntimeDirectoryMode=0750
|
||||||
|
|
||||||
|
Environment=CONTINUWUITY_CONFIG=${CREDENTIALS_DIRECTORY}/config.toml
|
||||||
|
LoadCredential=config.toml:/etc/conduwuit/conduwuit.toml
|
||||||
|
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
|
||||||
|
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
|
||||||
|
|
||||||
|
ExecStart=/usr/bin/conduwuit
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
TimeoutStopSec=4m
|
||||||
|
TimeoutStartSec=4m
|
||||||
|
|
||||||
|
StartLimitInterval=1m
|
||||||
|
StartLimitBurst=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
|
@ -79,11 +79,9 @@
|
||||||
# This is the only directory where continuwuity will save its data,
|
# This is the only directory where continuwuity will save its data,
|
||||||
# including media. Note: this was previously "/var/lib/matrix-conduit".
|
# including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||||
#
|
#
|
||||||
# YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
|
# YOU NEED TO EDIT THIS.
|
||||||
# `systemd` service. The service file sets it to `/var/lib/conduwuit`
|
|
||||||
# using an environment variable and also grants write access.
|
|
||||||
#
|
#
|
||||||
# example: "/var/lib/conduwuit"
|
# example: "/var/lib/continuwuity"
|
||||||
#
|
#
|
||||||
#database_path =
|
#database_path =
|
||||||
|
|
||||||
|
@ -591,19 +589,13 @@
|
||||||
#
|
#
|
||||||
#default_room_version = 11
|
#default_room_version = 11
|
||||||
|
|
||||||
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
|
# This item is undocumented. Please contribute documentation for it.
|
||||||
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
|
|
||||||
# Jaeger) that supports the OpenTelemetry Protocol.
|
|
||||||
#
|
#
|
||||||
# Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
|
#allow_jaeger = false
|
||||||
# environment variable (defaults to http://localhost:4318).
|
|
||||||
#
|
|
||||||
#allow_otlp = false
|
|
||||||
|
|
||||||
# Filter for OTLP tracing spans. This controls which spans are exported
|
# This item is undocumented. Please contribute documentation for it.
|
||||||
# to the OTLP collector.
|
|
||||||
#
|
#
|
||||||
#otlp_filter = "info"
|
#jaeger_filter = "info"
|
||||||
|
|
||||||
# If the 'perf_measurements' compile-time feature is enabled, enables
|
# If the 'perf_measurements' compile-time feature is enabled, enables
|
||||||
# collecting folded stack trace profile of tracing spans using
|
# collecting folded stack trace profile of tracing spans using
|
||||||
|
@ -1704,10 +1696,6 @@
|
||||||
#
|
#
|
||||||
#config_reload_signal = true
|
#config_reload_signal = true
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
|
||||||
#
|
|
||||||
#ldap = false
|
|
||||||
|
|
||||||
[global.tls]
|
[global.tls]
|
||||||
|
|
||||||
# Path to a valid TLS certificate file.
|
# Path to a valid TLS certificate file.
|
||||||
|
@ -1786,91 +1774,3 @@
|
||||||
# is 33.55MB. Setting it to 0 disables blurhashing.
|
# is 33.55MB. Setting it to 0 disables blurhashing.
|
||||||
#
|
#
|
||||||
#blurhash_max_raw_size = 33554432
|
#blurhash_max_raw_size = 33554432
|
||||||
|
|
||||||
[global.ldap]
|
|
||||||
|
|
||||||
# Whether to enable LDAP login.
|
|
||||||
#
|
|
||||||
# example: "true"
|
|
||||||
#
|
|
||||||
#enable = false
|
|
||||||
|
|
||||||
# Whether to force LDAP authentication or authorize classical password
|
|
||||||
# login.
|
|
||||||
#
|
|
||||||
# example: "true"
|
|
||||||
#
|
|
||||||
#ldap_only = false
|
|
||||||
|
|
||||||
# URI of the LDAP server.
|
|
||||||
#
|
|
||||||
# example: "ldap://ldap.example.com:389"
|
|
||||||
#
|
|
||||||
#uri = ""
|
|
||||||
|
|
||||||
# Root of the searches.
|
|
||||||
#
|
|
||||||
# example: "ou=users,dc=example,dc=org"
|
|
||||||
#
|
|
||||||
#base_dn = ""
|
|
||||||
|
|
||||||
# Bind DN if anonymous search is not enabled.
|
|
||||||
#
|
|
||||||
# You can use the variable `{username}` that will be replaced by the
|
|
||||||
# entered username. In such case, the password used to bind will be the
|
|
||||||
# one provided for the login and not the one given by
|
|
||||||
# `bind_password_file`. Beware: automatically granting admin rights will
|
|
||||||
# not work if you use this direct bind instead of a LDAP search.
|
|
||||||
#
|
|
||||||
# example: "cn=ldap-reader,dc=example,dc=org" or
|
|
||||||
# "cn={username},ou=users,dc=example,dc=org"
|
|
||||||
#
|
|
||||||
#bind_dn = ""
|
|
||||||
|
|
||||||
# Path to a file on the system that contains the password for the
|
|
||||||
# `bind_dn`.
|
|
||||||
#
|
|
||||||
# The server must be able to access the file, and it must not be empty.
|
|
||||||
#
|
|
||||||
#bind_password_file = ""
|
|
||||||
|
|
||||||
# Search filter to limit user searches.
|
|
||||||
#
|
|
||||||
# You can use the variable `{username}` that will be replaced by the
|
|
||||||
# entered username for more complex filters.
|
|
||||||
#
|
|
||||||
# example: "(&(objectClass=person)(memberOf=matrix))"
|
|
||||||
#
|
|
||||||
#filter = "(objectClass=*)"
|
|
||||||
|
|
||||||
# Attribute to use to uniquely identify the user.
|
|
||||||
#
|
|
||||||
# example: "uid" or "cn"
|
|
||||||
#
|
|
||||||
#uid_attribute = "uid"
|
|
||||||
|
|
||||||
# Attribute containing the display name of the user.
|
|
||||||
#
|
|
||||||
# example: "givenName" or "sn"
|
|
||||||
#
|
|
||||||
#name_attribute = "givenName"
|
|
||||||
|
|
||||||
# Root of the searches for admin users.
|
|
||||||
#
|
|
||||||
# Defaults to `base_dn` if empty.
|
|
||||||
#
|
|
||||||
# example: "ou=admins,dc=example,dc=org"
|
|
||||||
#
|
|
||||||
#admin_base_dn = ""
|
|
||||||
|
|
||||||
# The LDAP search filter to find administrative users for continuwuity.
|
|
||||||
#
|
|
||||||
# If left blank, administrative state must be configured manually for each
|
|
||||||
# user.
|
|
||||||
#
|
|
||||||
# You can use the variable `{username}` that will be replaced by the
|
|
||||||
# entered username for more complex filters.
|
|
||||||
#
|
|
||||||
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
|
|
||||||
#
|
|
||||||
#admin_filter = ""
|
|
||||||
|
|
0
pkg/debian/README.md → debian/README.md
vendored
0
pkg/debian/README.md → debian/README.md
vendored
18
pkg/conduwuit.service → debian/conduwuit.service
vendored
18
pkg/conduwuit.service → debian/conduwuit.service
vendored
|
@ -1,24 +1,25 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
|
|
||||||
Description=Continuwuity - Matrix homeserver
|
Description=Continuwuity - Matrix homeserver
|
||||||
Documentation=https://continuwuity.org/
|
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
|
Documentation=https://continuwuity.org/
|
||||||
Alias=matrix-conduwuit.service
|
Alias=matrix-conduwuit.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
DynamicUser=yes
|
DynamicUser=yes
|
||||||
User=conduwuit
|
User=conduwuit
|
||||||
Group=conduwuit
|
Group=conduwuit
|
||||||
Type=notify-reload
|
Type=notify
|
||||||
ReloadSignal=SIGUSR1
|
|
||||||
|
|
||||||
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||||
|
|
||||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||||
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
|
|
||||||
|
|
||||||
ExecStart=/usr/bin/conduwuit
|
ExecStart=/usr/sbin/conduwuit
|
||||||
|
|
||||||
|
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
|
||||||
|
|
||||||
AmbientCapabilities=
|
AmbientCapabilities=
|
||||||
CapabilityBoundingSet=
|
CapabilityBoundingSet=
|
||||||
|
@ -51,17 +52,16 @@ SystemCallArchitectures=native
|
||||||
SystemCallFilter=@system-service @resources
|
SystemCallFilter=@system-service @resources
|
||||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||||
SystemCallErrorNumber=EPERM
|
SystemCallErrorNumber=EPERM
|
||||||
|
#StateDirectory=conduwuit
|
||||||
|
|
||||||
StateDirectory=conduwuit
|
|
||||||
ConfigurationDirectory=conduwuit
|
|
||||||
RuntimeDirectory=conduwuit
|
RuntimeDirectory=conduwuit
|
||||||
RuntimeDirectoryMode=0750
|
RuntimeDirectoryMode=0750
|
||||||
|
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
|
|
||||||
TimeoutStopSec=4m
|
TimeoutStopSec=2m
|
||||||
TimeoutStartSec=4m
|
TimeoutStartSec=2m
|
||||||
|
|
||||||
StartLimitInterval=1m
|
StartLimitInterval=1m
|
||||||
StartLimitBurst=5
|
StartLimitBurst=5
|
0
pkg/debian/config → debian/config
vendored
0
pkg/debian/config → debian/config
vendored
44
debian/postinst
vendored
Normal file
44
debian/postinst
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# TODO: implement debconf support that is maintainable without duplicating the config
|
||||||
|
#. /usr/share/debconf/confmodule
|
||||||
|
|
||||||
|
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
||||||
|
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
configure)
|
||||||
|
# Create the `conduwuit` user if it does not exist yet.
|
||||||
|
if ! getent passwd conduwuit > /dev/null ; then
|
||||||
|
echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2
|
||||||
|
adduser --system --group --quiet \
|
||||||
|
--home "$CONDUWUIT_DATABASE_PATH" \
|
||||||
|
--disabled-login \
|
||||||
|
--shell "/usr/sbin/nologin" \
|
||||||
|
conduwuit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create the database path if it does not exist yet and fix up ownership
|
||||||
|
# and permissions for the config.
|
||||||
|
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
|
||||||
|
|
||||||
|
# symlink the previous location for compatibility if it does not exist yet.
|
||||||
|
if ! test -L "/var/lib/matrix-conduit" ; then
|
||||||
|
ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
|
||||||
|
fi
|
||||||
|
|
||||||
|
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
|
||||||
|
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
|
||||||
|
|
||||||
|
chmod -v 740 "$CONDUWUIT_DATABASE_PATH"
|
||||||
|
|
||||||
|
echo ''
|
||||||
|
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
||||||
|
echo 'To start the server, run: systemctl start conduwuit.service'
|
||||||
|
echo ''
|
||||||
|
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
#DEBHELPER#
|
10
pkg/debian/postrm → debian/postrm
vendored
10
pkg/debian/postrm → debian/postrm
vendored
|
@ -20,18 +20,24 @@ case $1 in
|
||||||
|
|
||||||
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
|
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
|
||||||
if test -L "$CONDUWUIT_CONFIG_PATH"; then
|
if test -L "$CONDUWUIT_CONFIG_PATH"; then
|
||||||
echo "Deleting continuwuity configuration files"
|
echo "Deleting conduwuit configuration files"
|
||||||
rm -v -r "$CONDUWUIT_CONFIG_PATH"
|
rm -v -r "$CONDUWUIT_CONFIG_PATH"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
|
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
|
||||||
if test -L "$CONDUWUIT_DATABASE_PATH"; then
|
if test -L "$CONDUWUIT_DATABASE_PATH"; then
|
||||||
echo "Deleting continuwuity database directory"
|
echo "Deleting conduwuit database directory"
|
||||||
rm -r "$CONDUWUIT_DATABASE_PATH"
|
rm -r "$CONDUWUIT_DATABASE_PATH"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
|
||||||
|
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
|
||||||
|
echo "Removing matrix-conduit symlink"
|
||||||
|
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
|
@ -199,57 +199,32 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Extract dynamically linked dependencies
|
# Extract dynamically linked dependencies
|
||||||
RUN <<'DEPS_EOF'
|
RUN <<EOF
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
mkdir /out/libs /out/libs-root
|
mkdir /out/libs
|
||||||
|
mkdir /out/libs-root
|
||||||
# Process each binary
|
|
||||||
for BINARY in /out/sbin/*; do
|
for BINARY in /out/sbin/*; do
|
||||||
if lddtree_output=$(lddtree "$BINARY" 2>/dev/null) && [ -n "$lddtree_output" ]; then
|
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
|
||||||
echo "$lddtree_output" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | \
|
|
||||||
awk '{dest = ($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2; print "install -D " $1 " " dest}' | \
|
|
||||||
while read cmd; do eval "$cmd"; done
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
EOF
|
||||||
# Show what will be copied to runtime
|
|
||||||
echo "=== Libraries being copied to runtime image:"
|
|
||||||
find /out/libs* -type f 2>/dev/null | sort || echo "No libraries found"
|
|
||||||
DEPS_EOF
|
|
||||||
|
|
||||||
FROM ubuntu:latest AS prepper
|
|
||||||
|
|
||||||
# Create layer structure
|
|
||||||
RUN mkdir -p /layer1/etc/ssl/certs \
|
|
||||||
/layer2/usr/lib \
|
|
||||||
/layer3/sbin /layer3/sbom
|
|
||||||
|
|
||||||
# Copy SSL certs and root-path libraries to layer1 (ultra-stable)
|
|
||||||
COPY --from=base /etc/ssl/certs /layer1/etc/ssl/certs
|
|
||||||
COPY --from=builder /out/libs-root/ /layer1/
|
|
||||||
|
|
||||||
# Copy application libraries to layer2 (semi-stable)
|
|
||||||
COPY --from=builder /out/libs/ /layer2/usr/lib/
|
|
||||||
|
|
||||||
# Copy binaries and SBOM to layer3 (volatile)
|
|
||||||
COPY --from=builder /out/sbin/ /layer3/sbin/
|
|
||||||
COPY --from=builder /out/sbom/ /layer3/sbom/
|
|
||||||
|
|
||||||
# Fix permissions after copying
|
|
||||||
RUN chmod -R 755 /layer1 /layer2 /layer3
|
|
||||||
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
|
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
# Copy ultra-stable layer (SSL certs, system libraries)
|
# Copy root certs for tls into image
|
||||||
COPY --from=prepper /layer1/ /
|
# You can also mount the certs from the host
|
||||||
|
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
|
||||||
|
COPY --from=base /etc/ssl/certs /etc/ssl/certs
|
||||||
|
|
||||||
# Copy semi-stable layer (application libraries)
|
# Copy our build
|
||||||
COPY --from=prepper /layer2/ /
|
COPY --from=builder /out/sbin/ /sbin/
|
||||||
|
# Copy SBOM
|
||||||
|
COPY --from=builder /out/sbom/ /sbom/
|
||||||
|
|
||||||
# Copy volatile layer (binaries, SBOM)
|
# Copy dynamic libraries to root
|
||||||
COPY --from=prepper /layer3/ /
|
COPY --from=builder /out/libs-root/ /
|
||||||
|
COPY --from=builder /out/libs/ /usr/lib/
|
||||||
|
|
||||||
# Inform linker where to find libraries
|
# Inform linker where to find libraries
|
||||||
ENV LD_LIBRARY_PATH=/usr/lib
|
ENV LD_LIBRARY_PATH=/usr/lib
|
||||||
|
|
|
@ -21,7 +21,6 @@ This document contains the help content for the `admin` command-line program.
|
||||||
* [`admin users list-joined-rooms`↴](#admin-users-list-joined-rooms)
|
* [`admin users list-joined-rooms`↴](#admin-users-list-joined-rooms)
|
||||||
* [`admin users force-join-room`↴](#admin-users-force-join-room)
|
* [`admin users force-join-room`↴](#admin-users-force-join-room)
|
||||||
* [`admin users force-leave-room`↴](#admin-users-force-leave-room)
|
* [`admin users force-leave-room`↴](#admin-users-force-leave-room)
|
||||||
* [`admin users force-leave-remote-room`↴](#admin-users-force-leave-remote-room)
|
|
||||||
* [`admin users force-demote`↴](#admin-users-force-demote)
|
* [`admin users force-demote`↴](#admin-users-force-demote)
|
||||||
* [`admin users make-user-admin`↴](#admin-users-make-user-admin)
|
* [`admin users make-user-admin`↴](#admin-users-make-user-admin)
|
||||||
* [`admin users put-room-tag`↴](#admin-users-put-room-tag)
|
* [`admin users put-room-tag`↴](#admin-users-put-room-tag)
|
||||||
|
@ -296,7 +295,6 @@ You can find the ID using the `list-appservices` command.
|
||||||
* `list-joined-rooms` — - Lists all the rooms (local and remote) that the specified user is joined in
|
* `list-joined-rooms` — - Lists all the rooms (local and remote) that the specified user is joined in
|
||||||
* `force-join-room` — - Manually join a local user to a room
|
* `force-join-room` — - Manually join a local user to a room
|
||||||
* `force-leave-room` — - Manually leave a local user from a room
|
* `force-leave-room` — - Manually leave a local user from a room
|
||||||
* `force-leave-remote-room` — - Manually leave a remote room for a local user
|
|
||||||
* `force-demote` — - Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
* `force-demote` — - Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
||||||
* `make-user-admin` — - Grant server-admin privileges to a user
|
* `make-user-admin` — - Grant server-admin privileges to a user
|
||||||
* `put-room-tag` — - Puts a room tag for the specified user and room ID
|
* `put-room-tag` — - Puts a room tag for the specified user and room ID
|
||||||
|
@ -451,19 +449,6 @@ Reverses the effects of the `suspend` command, allowing the user to send message
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## `admin users force-leave-remote-room`
|
|
||||||
|
|
||||||
- Manually leave a remote room for a local user
|
|
||||||
|
|
||||||
**Usage:** `admin users force-leave-remote-room <USER_ID> <ROOM_ID>`
|
|
||||||
|
|
||||||
###### **Arguments:**
|
|
||||||
|
|
||||||
* `<USER_ID>`
|
|
||||||
* `<ROOM_ID>`
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## `admin users force-demote`
|
## `admin users force-demote`
|
||||||
|
|
||||||
- Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
- Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
||||||
|
|
|
@ -9,11 +9,24 @@
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## systemd unit file
|
## Debian systemd unit file
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>systemd unit file</summary>
|
<summary>Debian systemd unit file</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
{{#include ../../pkg/conduwuit.service}}
|
{{#include ../../debian/conduwuit.service}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Arch Linux systemd unit file
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Arch Linux systemd unit file</summary>
|
||||||
|
|
||||||
|
```
|
||||||
|
{{#include ../../arch/conduwuit.service}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
{{#include ../../pkg/debian/README.md}}
|
{{#include ../../debian/README.md}}
|
||||||
|
|
32
flake.lock
generated
32
flake.lock
generated
|
@ -153,11 +153,11 @@
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1755585599,
|
"lastModified": 1751525020,
|
||||||
"narHash": "sha256-tl/0cnsqB/Yt7DbaGMel2RLa7QG5elA8lkaOXli6VdY=",
|
"narHash": "sha256-oDO6lCYS5Bf4jUITChj9XV7k3TP38DE0Ckz5n5ORCME=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "6ed03ef4c8ec36d193c18e06b9ecddde78fb7e42",
|
"rev": "a1a5f92f47787e7df9f30e5e5ac13e679215aa1e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -513,6 +513,23 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"rocksdb": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1741308171,
|
||||||
|
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
|
||||||
|
"ref": "v9.11.1",
|
||||||
|
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
|
||||||
|
"revCount": 13177,
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"ref": "v9.11.1",
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
||||||
|
}
|
||||||
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"attic": "attic",
|
"attic": "attic",
|
||||||
|
@ -522,17 +539,18 @@
|
||||||
"flake-compat": "flake-compat_3",
|
"flake-compat": "flake-compat_3",
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": "flake-utils",
|
||||||
"nix-filter": "nix-filter",
|
"nix-filter": "nix-filter",
|
||||||
"nixpkgs": "nixpkgs_5"
|
"nixpkgs": "nixpkgs_5",
|
||||||
|
"rocksdb": "rocksdb"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1755504847,
|
"lastModified": 1751433876,
|
||||||
"narHash": "sha256-VX0B9hwhJypCGqncVVLC+SmeMVd/GAYbJZ0MiiUn2Pk=",
|
"narHash": "sha256-IsdwOcvLLDDlkFNwhdD5BZy20okIQL01+UQ7Kxbqh8s=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "a905e3b21b144d77e1b304e49f3264f6f8d4db75",
|
"rev": "11d45c881389dae90b0da5a94cde52c79d0fc7ef",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
30
flake.nix
30
flake.nix
|
@ -16,6 +16,10 @@
|
||||||
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
||||||
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
||||||
|
rocksdb = {
|
||||||
|
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=v9.11.1";
|
||||||
|
flake = false;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs =
|
outputs =
|
||||||
|
@ -27,24 +31,20 @@
|
||||||
inherit system;
|
inherit system;
|
||||||
};
|
};
|
||||||
|
|
||||||
fnx = inputs.fenix.packages.${system};
|
|
||||||
# The Rust toolchain to use
|
# The Rust toolchain to use
|
||||||
toolchain = fnx.combine [
|
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
|
||||||
(fnx.fromToolchainFile {
|
file = ./rust-toolchain.toml;
|
||||||
file = ./rust-toolchain.toml;
|
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
# See also `rust-toolchain.toml`
|
||||||
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
|
sha256 = "sha256-KUm16pHj+cRedf8vxs/Hd2YWxpOrWZ7UOrwhILdSJBU=";
|
||||||
})
|
};
|
||||||
fnx.complete.rustfmt
|
|
||||||
];
|
|
||||||
|
|
||||||
mkScope =
|
mkScope =
|
||||||
pkgs:
|
pkgs:
|
||||||
pkgs.lib.makeScope pkgs.newScope (self: {
|
pkgs.lib.makeScope pkgs.newScope (self: {
|
||||||
inherit pkgs inputs;
|
inherit pkgs inputs;
|
||||||
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
|
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
|
||||||
main = self.callPackage ./pkg/nix/pkgs/main { };
|
main = self.callPackage ./nix/pkgs/main { };
|
||||||
liburing = pkgs.liburing.overrideAttrs {
|
liburing = pkgs.liburing.overrideAttrs {
|
||||||
# Tests weren't building
|
# Tests weren't building
|
||||||
outputs = [
|
outputs = [
|
||||||
|
@ -61,14 +61,8 @@
|
||||||
inherit (self) liburing;
|
inherit (self) liburing;
|
||||||
}).overrideAttrs
|
}).overrideAttrs
|
||||||
(old: {
|
(old: {
|
||||||
src = pkgsHost.fetchFromGitea {
|
src = inputs.rocksdb;
|
||||||
domain = "forgejo.ellis.link";
|
version = "v9.11.1";
|
||||||
owner = "continuwuation";
|
|
||||||
repo = "rocksdb";
|
|
||||||
rev = "10.4.fb";
|
|
||||||
sha256 = "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=";
|
|
||||||
};
|
|
||||||
version = "v10.4.fb";
|
|
||||||
cmakeFlags =
|
cmakeFlags =
|
||||||
pkgs.lib.subtractLists [
|
pkgs.lib.subtractLists [
|
||||||
# No real reason to have snappy or zlib, no one uses this
|
# No real reason to have snappy or zlib, no one uses this
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# TODO: implement debconf support that is maintainable without duplicating the config
|
|
||||||
#. /usr/share/debconf/confmodule
|
|
||||||
|
|
||||||
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
|
||||||
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
configure)
|
|
||||||
echo ''
|
|
||||||
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
|
||||||
echo 'To start the server, run: systemctl start conduwuit.service'
|
|
||||||
echo ''
|
|
||||||
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
|
@ -1,80 +0,0 @@
|
||||||
# This should be run using rpkg-util: https://docs.pagure.org/rpkg-util
|
|
||||||
# it requires Internet access and is not suitable for Fedora main repos
|
|
||||||
# TODO: rpkg-util is no longer maintained, find a replacement
|
|
||||||
|
|
||||||
Name: continuwuity
|
|
||||||
Version: {{{ git_repo_version }}}
|
|
||||||
Release: 1%{?dist}
|
|
||||||
Summary: Very cool Matrix chat homeserver written in Rust
|
|
||||||
|
|
||||||
License: Apache-2.0 AND MIT
|
|
||||||
|
|
||||||
URL: https://continuwuity.org
|
|
||||||
VCS: {{{ git_repo_vcs }}}
|
|
||||||
Source: {{{ git_repo_pack }}}
|
|
||||||
|
|
||||||
BuildRequires: cargo-rpm-macros >= 25
|
|
||||||
BuildRequires: systemd-rpm-macros
|
|
||||||
# Needed to build rust-librocksdb-sys
|
|
||||||
BuildRequires: clang
|
|
||||||
BuildRequires: liburing-devel
|
|
||||||
|
|
||||||
Requires: liburing
|
|
||||||
Requires: glibc
|
|
||||||
Requires: libstdc++
|
|
||||||
|
|
||||||
%global _description %{expand:
|
|
||||||
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
|
|
||||||
|
|
||||||
%description %{_description}
|
|
||||||
|
|
||||||
%prep
|
|
||||||
{{{ git_repo_setup_macro }}}
|
|
||||||
%cargo_prep -N
|
|
||||||
# Perform an online build so Git dependencies can be retrieved
|
|
||||||
sed -i 's/^offline = true$//' .cargo/config.toml
|
|
||||||
|
|
||||||
%build
|
|
||||||
%cargo_build
|
|
||||||
|
|
||||||
# Here's the one legally required mystery incantation in this file.
|
|
||||||
# Some of our dependencies have source files which are (for some reason) marked as executable.
|
|
||||||
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
|
|
||||||
# at the end of the build step, and then the BRP shebang mangling script checks
|
|
||||||
# the entire buildroot to find executable files, and fails the build because
|
|
||||||
# it thinks Rust's file attributes are shebangs because they start with `#!`.
|
|
||||||
# So we have to clear the executable bit on all of them before that happens.
|
|
||||||
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
|
|
||||||
|
|
||||||
# TODO: this fails currently because it's forced to run in offline mode
|
|
||||||
# {cargo_license -- --no-dev} > LICENSE.dependencies
|
|
||||||
|
|
||||||
%install
|
|
||||||
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
|
|
||||||
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
|
|
||||||
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
|
|
||||||
|
|
||||||
%files
|
|
||||||
%license LICENSE
|
|
||||||
%license src/core/matrix/state_res/LICENSE
|
|
||||||
%doc CODE_OF_CONDUCT.md
|
|
||||||
%doc CONTRIBUTING.md
|
|
||||||
%doc README.md
|
|
||||||
%doc SECURITY.md
|
|
||||||
%config %{_sysconfdir}/conduwuit/conduwuit.toml
|
|
||||||
|
|
||||||
%{_bindir}/conduwuit
|
|
||||||
%{_unitdir}/conduwuit.service
|
|
||||||
# Do not create /var/lib/conduwuit, systemd will create it if necessary
|
|
||||||
|
|
||||||
%post
|
|
||||||
%systemd_post conduwuit.service
|
|
||||||
|
|
||||||
%preun
|
|
||||||
%systemd_preun conduwuit.service
|
|
||||||
|
|
||||||
%postun
|
|
||||||
%systemd_postun_with_restart conduwuit.service
|
|
||||||
|
|
||||||
%changelog
|
|
||||||
{{{ git_repo_changelog }}}
|
|
|
@ -1,59 +1,26 @@
|
||||||
{
|
{
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||||
"extends": ["config:recommended"],
|
"extends": [
|
||||||
"lockFileMaintenance": {
|
"config:recommended"
|
||||||
"enabled": true,
|
],
|
||||||
"schedule": ["at any time"]
|
"lockFileMaintenance": {
|
||||||
},
|
"enabled": true,
|
||||||
"nix": {
|
"schedule": [
|
||||||
"enabled": true
|
"at any time"
|
||||||
},
|
|
||||||
"labels": ["Dependencies", "Dependencies/Renovate"],
|
|
||||||
"ignoreDeps": [
|
|
||||||
"tikv-jemallocator",
|
|
||||||
"tikv-jemalloc-sys",
|
|
||||||
"tikv-jemalloc-ctl",
|
|
||||||
"opentelemetry",
|
|
||||||
"opentelemetry_sdk",
|
|
||||||
"opentelemetry-jaeger",
|
|
||||||
"tracing-opentelemetry"
|
|
||||||
],
|
|
||||||
"github-actions": {
|
|
||||||
"enabled": true,
|
|
||||||
"managerFilePatterns": [
|
|
||||||
"/(^|/)\\.forgejo/workflows/[^/]+\\.ya?ml$/",
|
|
||||||
"/(^|/)\\.forgejo/actions/[^/]+/action\\.ya?ml$/",
|
|
||||||
"/(^|/)\\.github/workflows/[^/]+\\.ya?ml$/",
|
|
||||||
"/(^|/)\\.github/actions/[^/]+/action\\.ya?ml$/"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"packageRules": [
|
|
||||||
{
|
|
||||||
"description": "Batch minor and patch GitHub Actions updates",
|
|
||||||
"matchManagers": ["github-actions"],
|
|
||||||
"matchUpdateTypes": ["minor", "patch"],
|
|
||||||
"groupName": "github-actions-non-major"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": "Group Rust toolchain updates into a single PR",
|
|
||||||
"matchManagers": ["custom.regex"],
|
|
||||||
"matchPackageNames": ["rust", "rustc", "cargo"],
|
|
||||||
"groupName": "rust-toolchain"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": "Group lockfile updates into a single PR",
|
|
||||||
"matchUpdateTypes": ["lockFileMaintenance"],
|
|
||||||
"groupName": "lockfile-maintenance"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": "Batch patch-level Rust dependency updates",
|
|
||||||
"matchManagers": ["cargo"],
|
|
||||||
"matchUpdateTypes": ["patch"],
|
|
||||||
"groupName": "rust-patch-updates"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchManagers": ["cargo"],
|
|
||||||
"prConcurrentLimit": 5
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
"nix": {
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"labels": [
|
||||||
|
"dependencies",
|
||||||
|
"github_actions"
|
||||||
|
],
|
||||||
|
"ignoreDeps": [
|
||||||
|
"tikv-jemllocator",
|
||||||
|
"tikv-jemalloc-sys",
|
||||||
|
"tikv-jemalloc-ctl",
|
||||||
|
"opentelemetry-rust",
|
||||||
|
"tracing-opentelemetry"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,16 +9,13 @@
|
||||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||||
|
|
||||||
[toolchain]
|
[toolchain]
|
||||||
|
channel = "1.87.0"
|
||||||
profile = "minimal"
|
profile = "minimal"
|
||||||
channel = "1.89.0"
|
|
||||||
components = [
|
components = [
|
||||||
# For rust-analyzer
|
# For rust-analyzer
|
||||||
"rust-src",
|
"rust-src",
|
||||||
"rust-analyzer",
|
"rust-analyzer",
|
||||||
# For CI and editors
|
# For CI and editors
|
||||||
|
"rustfmt",
|
||||||
"clippy",
|
"clippy",
|
||||||
# you have to install rustfmt nightly yourself (if you're not on NixOS)
|
|
||||||
#
|
|
||||||
# The rust-toolchain.toml file doesn't provide any syntax for specifying components from different toolchains
|
|
||||||
# "rustfmt"
|
|
||||||
]
|
]
|
||||||
|
|
|
@ -89,7 +89,6 @@ serde_yaml.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tracing-subscriber.workspace = true
|
tracing-subscriber.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
ctor.workspace = true
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
|
@ -29,8 +29,6 @@ pub(crate) use crate::{context::Context, utils::get_room_info};
|
||||||
|
|
||||||
pub(crate) const PAGE_SIZE: usize = 100;
|
pub(crate) const PAGE_SIZE: usize = 100;
|
||||||
|
|
||||||
use ctor::{ctor, dtor};
|
|
||||||
|
|
||||||
conduwuit::mod_ctor! {}
|
conduwuit::mod_ctor! {}
|
||||||
conduwuit::mod_dtor! {}
|
conduwuit::mod_dtor! {}
|
||||||
conduwuit::rustc_flags_capture! {}
|
conduwuit::rustc_flags_capture! {}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use std::{collections::BTreeMap, fmt::Write as _};
|
use std::{collections::BTreeMap, fmt::Write as _};
|
||||||
|
|
||||||
use api::client::{
|
use api::client::{
|
||||||
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room,
|
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, update_avatar_url,
|
||||||
update_avatar_url, update_displayname,
|
update_displayname,
|
||||||
};
|
};
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
||||||
|
@ -68,8 +68,7 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||||
// Create user
|
// Create user
|
||||||
self.services
|
self.services
|
||||||
.users
|
.users
|
||||||
.create(&user_id, Some(password.as_str()), None)
|
.create(&user_id, Some(password.as_str()))?;
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Default to pretty displayname
|
// Default to pretty displayname
|
||||||
let mut displayname = user_id.localpart().to_owned();
|
let mut displayname = user_id.localpart().to_owned();
|
||||||
|
@ -285,7 +284,6 @@ pub(super) async fn reset_password(&self, username: String, password: Option<Str
|
||||||
.services
|
.services
|
||||||
.users
|
.users
|
||||||
.set_password(&user_id, Some(new_password.as_str()))
|
.set_password(&user_id, Some(new_password.as_str()))
|
||||||
.await
|
|
||||||
{
|
{
|
||||||
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
||||||
| Ok(()) => {
|
| Ok(()) => {
|
||||||
|
@ -926,29 +924,3 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
|
||||||
pub(super) async fn force_leave_remote_room(
|
|
||||||
&self,
|
|
||||||
user_id: String,
|
|
||||||
room_id: OwnedRoomOrAliasId,
|
|
||||||
) -> Result {
|
|
||||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
|
||||||
let (room_id, _) = self
|
|
||||||
.services
|
|
||||||
.rooms
|
|
||||||
.alias
|
|
||||||
.resolve_with_servers(&room_id, None)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
assert!(
|
|
||||||
self.services.globals.user_is_local(&user_id),
|
|
||||||
"Parsed user_id must be a local user"
|
|
||||||
);
|
|
||||||
remote_leave_room(self.services, &user_id, &room_id, None)
|
|
||||||
.boxed()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
|
@ -103,12 +103,6 @@ pub enum UserCommand {
|
||||||
room_id: OwnedRoomOrAliasId,
|
room_id: OwnedRoomOrAliasId,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Manually leave a remote room for a local user.
|
|
||||||
ForceLeaveRemoteRoom {
|
|
||||||
user_id: String,
|
|
||||||
room_id: OwnedRoomOrAliasId,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// - Forces the specified user to drop their power levels to the room
|
/// - Forces the specified user to drop their power levels to the room
|
||||||
/// default, if their permissions allow and the auth check permits
|
/// default, if their permissions allow and the auth check permits
|
||||||
ForceDemote {
|
ForceDemote {
|
||||||
|
|
|
@ -49,9 +49,6 @@ jemalloc_stats = [
|
||||||
"conduwuit-core/jemalloc_stats",
|
"conduwuit-core/jemalloc_stats",
|
||||||
"conduwuit-service/jemalloc_stats",
|
"conduwuit-service/jemalloc_stats",
|
||||||
]
|
]
|
||||||
ldap = [
|
|
||||||
"conduwuit-service/ldap"
|
|
||||||
]
|
|
||||||
release_max_log_level = [
|
release_max_log_level = [
|
||||||
"conduwuit-core/release_max_log_level",
|
"conduwuit-core/release_max_log_level",
|
||||||
"conduwuit-service/release_max_log_level",
|
"conduwuit-service/release_max_log_level",
|
||||||
|
@ -93,7 +90,6 @@ serde.workspace = true
|
||||||
sha1.workspace = true
|
sha1.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
ctor.workspace = true
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
|
@ -373,7 +373,7 @@ pub(crate) async fn register_route(
|
||||||
let password = if is_guest { None } else { body.password.as_deref() };
|
let password = if is_guest { None } else { body.password.as_deref() };
|
||||||
|
|
||||||
// Create user
|
// Create user
|
||||||
services.users.create(&user_id, password, None).await?;
|
services.users.create(&user_id, password)?;
|
||||||
|
|
||||||
// Default to pretty displayname
|
// Default to pretty displayname
|
||||||
let mut displayname = user_id.localpart().to_owned();
|
let mut displayname = user_id.localpart().to_owned();
|
||||||
|
@ -659,8 +659,7 @@ pub(crate) async fn change_password_route(
|
||||||
|
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_password(sender_user, Some(&body.new_password))
|
.set_password(sender_user, Some(&body.new_password))?;
|
||||||
.await?;
|
|
||||||
|
|
||||||
if body.logout_devices {
|
if body.logout_devices {
|
||||||
// Logout all devices except the current one
|
// Logout all devices except the current one
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
mod suspend;
|
|
||||||
|
|
||||||
pub(crate) use self::suspend::*;
|
|
|
@ -1,89 +0,0 @@
|
||||||
use axum::extract::State;
|
|
||||||
use conduwuit::{Err, Result};
|
|
||||||
use futures::future::{join, join3};
|
|
||||||
use ruma::api::client::admin::{get_suspended, set_suspended};
|
|
||||||
|
|
||||||
use crate::Ruma;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/admin/suspend/{userId}`
|
|
||||||
///
|
|
||||||
/// Check the suspension status of a target user
|
|
||||||
pub(crate) async fn get_suspended_status(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<get_suspended::v1::Request>,
|
|
||||||
) -> Result<get_suspended::v1::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
|
|
||||||
let (admin, active) =
|
|
||||||
join(services.users.is_admin(sender_user), services.users.is_active(&body.user_id)).await;
|
|
||||||
if !admin {
|
|
||||||
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
|
|
||||||
}
|
|
||||||
if !services.globals.user_is_local(&body.user_id) {
|
|
||||||
return Err!(Request(InvalidParam("Can only check the suspended status of local users")));
|
|
||||||
}
|
|
||||||
if !active {
|
|
||||||
return Err!(Request(NotFound("Unknown user")));
|
|
||||||
}
|
|
||||||
Ok(get_suspended::v1::Response::new(
|
|
||||||
services.users.is_suspended(&body.user_id).await?,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/v1/admin/suspend/{userId}`
|
|
||||||
///
|
|
||||||
/// Set the suspension status of a target user
|
|
||||||
pub(crate) async fn put_suspended_status(
|
|
||||||
State(services): State<crate::State>,
|
|
||||||
body: Ruma<set_suspended::v1::Request>,
|
|
||||||
) -> Result<set_suspended::v1::Response> {
|
|
||||||
let sender_user = body.sender_user();
|
|
||||||
|
|
||||||
let (sender_admin, active, target_admin) = join3(
|
|
||||||
services.users.is_admin(sender_user),
|
|
||||||
services.users.is_active(&body.user_id),
|
|
||||||
services.users.is_admin(&body.user_id),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if !sender_admin {
|
|
||||||
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
|
|
||||||
}
|
|
||||||
if !services.globals.user_is_local(&body.user_id) {
|
|
||||||
return Err!(Request(InvalidParam("Can only set the suspended status of local users")));
|
|
||||||
}
|
|
||||||
if !active {
|
|
||||||
return Err!(Request(NotFound("Unknown user")));
|
|
||||||
}
|
|
||||||
if body.user_id == *sender_user {
|
|
||||||
return Err!(Request(Forbidden("You cannot suspend yourself")));
|
|
||||||
}
|
|
||||||
if target_admin {
|
|
||||||
return Err!(Request(Forbidden("You cannot suspend another server administrator")));
|
|
||||||
}
|
|
||||||
if services.users.is_suspended(&body.user_id).await? == body.suspended {
|
|
||||||
// No change
|
|
||||||
return Ok(set_suspended::v1::Response::new(body.suspended));
|
|
||||||
}
|
|
||||||
|
|
||||||
let action = if body.suspended {
|
|
||||||
services
|
|
||||||
.users
|
|
||||||
.suspend_account(&body.user_id, sender_user)
|
|
||||||
.await;
|
|
||||||
"suspended"
|
|
||||||
} else {
|
|
||||||
services.users.unsuspend_account(&body.user_id).await;
|
|
||||||
"unsuspended"
|
|
||||||
};
|
|
||||||
|
|
||||||
if services.config.admin_room_notices {
|
|
||||||
// Notify the admin room that an account has been un/suspended
|
|
||||||
services
|
|
||||||
.admin
|
|
||||||
.send_text(&format!("{} has been {} by {}.", body.user_id, action, sender_user))
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(set_suspended::v1::Response::new(body.suspended))
|
|
||||||
}
|
|
|
@ -19,7 +19,7 @@ use crate::Ruma;
|
||||||
/// of this server.
|
/// of this server.
|
||||||
pub(crate) async fn get_capabilities_route(
|
pub(crate) async fn get_capabilities_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
body: Ruma<get_capabilities::v3::Request>,
|
_body: Ruma<get_capabilities::v3::Request>,
|
||||||
) -> Result<get_capabilities::v3::Response> {
|
) -> Result<get_capabilities::v3::Response> {
|
||||||
let available: BTreeMap<RoomVersionId, RoomVersionStability> =
|
let available: BTreeMap<RoomVersionId, RoomVersionStability> =
|
||||||
Server::available_room_versions().collect();
|
Server::available_room_versions().collect();
|
||||||
|
@ -45,14 +45,5 @@ pub(crate) async fn get_capabilities_route(
|
||||||
json!({"enabled": services.config.forget_forced_upon_leave}),
|
json!({"enabled": services.config.forget_forced_upon_leave}),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if services
|
|
||||||
.users
|
|
||||||
.is_admin(body.sender_user.as_ref().unwrap())
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
// Advertise suspension API
|
|
||||||
capabilities.set("uk.timedout.msc4323", json!({"suspend":true, "lock": false}))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(get_capabilities::v3::Response { capabilities })
|
Ok(get_capabilities::v3::Response { capabilities })
|
||||||
}
|
}
|
||||||
|
|
|
@ -156,34 +156,31 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut servers = body.via.clone();
|
let mut servers = body.via.clone();
|
||||||
if servers.is_empty() {
|
servers.extend(
|
||||||
debug!("No via servers provided for join, injecting some.");
|
services
|
||||||
servers.extend(
|
.rooms
|
||||||
services
|
.state_cache
|
||||||
.rooms
|
.servers_invite_via(&room_id)
|
||||||
.state_cache
|
.map(ToOwned::to_owned)
|
||||||
.servers_invite_via(&room_id)
|
.collect::<Vec<_>>()
|
||||||
.map(ToOwned::to_owned)
|
.await,
|
||||||
.collect::<Vec<_>>()
|
);
|
||||||
.await,
|
|
||||||
);
|
|
||||||
|
|
||||||
servers.extend(
|
servers.extend(
|
||||||
services
|
services
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.invite_state(sender_user, &room_id)
|
.invite_state(sender_user, &room_id)
|
||||||
.await
|
.await
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
.filter_map(|event| event.get_field("sender").ok().flatten())
|
||||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
||||||
.map(|user| user.server_name().to_owned()),
|
.map(|user| user.server_name().to_owned()),
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Some(server) = room_id.server_name() {
|
if let Some(server) = room_id.server_name() {
|
||||||
servers.push(server.to_owned());
|
servers.push(server.to_owned());
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
servers.sort_unstable();
|
servers.sort_unstable();
|
||||||
|
|
|
@ -215,7 +215,7 @@ pub async fn leave_room(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn remote_leave_room(
|
async fn remote_leave_room(
|
||||||
services: &Services,
|
services: &Services,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
|
|
@ -29,7 +29,7 @@ pub(crate) use self::{
|
||||||
};
|
};
|
||||||
pub use self::{
|
pub use self::{
|
||||||
join::join_room_by_id_helper,
|
join::join_room_by_id_helper,
|
||||||
leave::{leave_all_rooms, leave_room, remote_leave_room},
|
leave::{leave_all_rooms, leave_room},
|
||||||
};
|
};
|
||||||
use crate::{Ruma, client::full_user_deactivate};
|
use crate::{Ruma, client::full_user_deactivate};
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ use conduwuit::{
|
||||||
ref_at,
|
ref_at,
|
||||||
utils::{
|
utils::{
|
||||||
IterStream, ReadyExt,
|
IterStream, ReadyExt,
|
||||||
result::LogErr,
|
result::{FlatOk, LogErr},
|
||||||
stream::{BroadbandExt, TryIgnore, WidebandExt},
|
stream::{BroadbandExt, TryIgnore, WidebandExt},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -35,7 +35,6 @@ use ruma::{
|
||||||
};
|
};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
use super::utils::{count_to_token, parse_pagination_token as parse_token};
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
/// list of safe and common non-state events to ignore if the user is ignored
|
/// list of safe and common non-state events to ignore if the user is ignored
|
||||||
|
@ -85,14 +84,14 @@ pub(crate) async fn get_message_events_route(
|
||||||
let from: PduCount = body
|
let from: PduCount = body
|
||||||
.from
|
.from
|
||||||
.as_deref()
|
.as_deref()
|
||||||
.map(parse_token)
|
.map(str::parse)
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.unwrap_or_else(|| match body.dir {
|
.unwrap_or_else(|| match body.dir {
|
||||||
| Direction::Forward => PduCount::min(),
|
| Direction::Forward => PduCount::min(),
|
||||||
| Direction::Backward => PduCount::max(),
|
| Direction::Backward => PduCount::max(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let to: Option<PduCount> = body.to.as_deref().map(parse_token).transpose()?;
|
let to: Option<PduCount> = body.to.as_deref().map(str::parse).flat_ok();
|
||||||
|
|
||||||
let limit: usize = body
|
let limit: usize = body
|
||||||
.limit
|
.limit
|
||||||
|
@ -181,8 +180,8 @@ pub(crate) async fn get_message_events_route(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(get_message_events::v3::Response {
|
Ok(get_message_events::v3::Response {
|
||||||
start: count_to_token(from),
|
start: from.to_string(),
|
||||||
end: next_token.map(count_to_token),
|
end: next_token.as_ref().map(ToString::to_string),
|
||||||
chunk,
|
chunk,
|
||||||
state,
|
state,
|
||||||
})
|
})
|
||||||
|
@ -321,7 +320,7 @@ pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Opti
|
||||||
filter.matches(pdu).then_some(item)
|
filter.matches(pdu).then_some(item)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(debug_assertions, ctor::ctor)]
|
#[cfg_attr(debug_assertions, conduwuit::ctor)]
|
||||||
fn _is_sorted() {
|
fn _is_sorted() {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
IGNORED_MESSAGE_TYPES.is_sorted(),
|
IGNORED_MESSAGE_TYPES.is_sorted(),
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
pub(super) mod account;
|
pub(super) mod account;
|
||||||
pub(super) mod account_data;
|
pub(super) mod account_data;
|
||||||
pub(super) mod admin;
|
|
||||||
pub(super) mod alias;
|
pub(super) mod alias;
|
||||||
pub(super) mod appservice;
|
pub(super) mod appservice;
|
||||||
pub(super) mod backup;
|
pub(super) mod backup;
|
||||||
|
@ -37,14 +36,12 @@ pub(super) mod typing;
|
||||||
pub(super) mod unstable;
|
pub(super) mod unstable;
|
||||||
pub(super) mod unversioned;
|
pub(super) mod unversioned;
|
||||||
pub(super) mod user_directory;
|
pub(super) mod user_directory;
|
||||||
pub(super) mod utils;
|
|
||||||
pub(super) mod voip;
|
pub(super) mod voip;
|
||||||
pub(super) mod well_known;
|
pub(super) mod well_known;
|
||||||
|
|
||||||
pub use account::full_user_deactivate;
|
pub use account::full_user_deactivate;
|
||||||
pub(super) use account::*;
|
pub(super) use account::*;
|
||||||
pub(super) use account_data::*;
|
pub(super) use account_data::*;
|
||||||
pub(super) use admin::*;
|
|
||||||
pub(super) use alias::*;
|
pub(super) use alias::*;
|
||||||
pub(super) use appservice::*;
|
pub(super) use appservice::*;
|
||||||
pub(super) use backup::*;
|
pub(super) use backup::*;
|
||||||
|
@ -57,7 +54,7 @@ pub(super) use keys::*;
|
||||||
pub(super) use media::*;
|
pub(super) use media::*;
|
||||||
pub(super) use media_legacy::*;
|
pub(super) use media_legacy::*;
|
||||||
pub(super) use membership::*;
|
pub(super) use membership::*;
|
||||||
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room};
|
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room};
|
||||||
pub(super) use message::*;
|
pub(super) use message::*;
|
||||||
pub(super) use openid::*;
|
pub(super) use openid::*;
|
||||||
pub(super) use presence::*;
|
pub(super) use presence::*;
|
||||||
|
|
|
@ -90,7 +90,7 @@ pub(crate) async fn get_displayname_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None, None).await?;
|
services.users.create(&body.user_id, None)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -189,7 +189,7 @@ pub(crate) async fn get_avatar_url_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None, None).await?;
|
services.users.create(&body.user_id, None)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -248,7 +248,7 @@ pub(crate) async fn get_profile_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None, None).await?;
|
services.users.create(&body.user_id, None)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
|
|
@ -18,7 +18,6 @@ use ruma::{
|
||||||
events::{TimelineEventType, relation::RelationType},
|
events::{TimelineEventType, relation::RelationType},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::utils::{count_to_token, parse_pagination_token as parse_token};
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
|
||||||
|
@ -111,14 +110,14 @@ async fn paginate_relations_with_filter(
|
||||||
dir: Direction,
|
dir: Direction,
|
||||||
) -> Result<get_relating_events::v1::Response> {
|
) -> Result<get_relating_events::v1::Response> {
|
||||||
let start: PduCount = from
|
let start: PduCount = from
|
||||||
.map(parse_token)
|
.map(str::parse)
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.unwrap_or_else(|| match dir {
|
.unwrap_or_else(|| match dir {
|
||||||
| Direction::Forward => PduCount::min(),
|
| Direction::Forward => PduCount::min(),
|
||||||
| Direction::Backward => PduCount::max(),
|
| Direction::Backward => PduCount::max(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let to: Option<PduCount> = to.map(parse_token).transpose()?;
|
let to: Option<PduCount> = to.map(str::parse).flat_ok();
|
||||||
|
|
||||||
// Use limit or else 30, with maximum 100
|
// Use limit or else 30, with maximum 100
|
||||||
let limit: usize = limit
|
let limit: usize = limit
|
||||||
|
@ -130,11 +129,6 @@ async fn paginate_relations_with_filter(
|
||||||
// Spec (v1.10) recommends depth of at least 3
|
// Spec (v1.10) recommends depth of at least 3
|
||||||
let depth: u8 = if recurse { 3 } else { 1 };
|
let depth: u8 = if recurse { 3 } else { 1 };
|
||||||
|
|
||||||
// Check if this is a thread request
|
|
||||||
let is_thread = filter_rel_type
|
|
||||||
.as_ref()
|
|
||||||
.is_some_and(|rel| *rel == RelationType::Thread);
|
|
||||||
|
|
||||||
let events: Vec<_> = services
|
let events: Vec<_> = services
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_metadata
|
.pdu_metadata
|
||||||
|
@ -158,58 +152,23 @@ async fn paginate_relations_with_filter(
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// For threads, check if we should include the root event
|
let next_batch = match dir {
|
||||||
let mut root_event = None;
|
| Direction::Forward => events.last(),
|
||||||
if is_thread && dir == Direction::Backward {
|
| Direction::Backward => events.first(),
|
||||||
// Check if we've reached the beginning of the thread
|
|
||||||
// (fewer events than requested means we've exhausted the thread)
|
|
||||||
if events.len() < limit {
|
|
||||||
// Try to get the thread root event
|
|
||||||
if let Ok(root_pdu) = services.rooms.timeline.get_pdu(target).await {
|
|
||||||
// Check visibility
|
|
||||||
if services
|
|
||||||
.rooms
|
|
||||||
.state_accessor
|
|
||||||
.user_can_see_event(sender_user, room_id, target)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
// Store the root event to add to the response
|
|
||||||
root_event = Some(root_pdu);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
.map(at!(0))
|
||||||
// Determine if there are more events to fetch
|
.as_ref()
|
||||||
let has_more = if root_event.is_some() {
|
.map(ToString::to_string);
|
||||||
false // We've included the root, no more events
|
|
||||||
} else {
|
|
||||||
// Check if we got a full page of results (might be more)
|
|
||||||
events.len() >= limit
|
|
||||||
};
|
|
||||||
|
|
||||||
let next_batch = if has_more {
|
|
||||||
match dir {
|
|
||||||
| Direction::Forward => events.last(),
|
|
||||||
| Direction::Backward => events.first(),
|
|
||||||
}
|
|
||||||
.map(|(count, _)| count_to_token(*count))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
// Build the response chunk with thread root if needed
|
|
||||||
let chunk: Vec<_> = root_event
|
|
||||||
.into_iter()
|
|
||||||
.map(Event::into_format)
|
|
||||||
.chain(events.into_iter().map(at!(1)).map(Event::into_format))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(get_relating_events::v1::Response {
|
Ok(get_relating_events::v1::Response {
|
||||||
next_batch,
|
next_batch,
|
||||||
prev_batch: from.map(Into::into),
|
prev_batch: from.map(Into::into),
|
||||||
recursion_depth: recurse.then_some(depth.into()),
|
recursion_depth: recurse.then_some(depth.into()),
|
||||||
chunk,
|
chunk: events
|
||||||
|
.into_iter()
|
||||||
|
.map(at!(1))
|
||||||
|
.map(Event::into_format)
|
||||||
|
.collect(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -63,9 +63,8 @@ pub(crate) async fn report_room_route(
|
||||||
.server_in_room(&services.server.name, &body.room_id)
|
.server_in_room(&services.server.name, &body.room_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!(Request(NotFound(
|
// return 200 as to not reveal if the room exists, preventing enumeration.
|
||||||
"Room does not exist to us, no local users have joined at all"
|
return Ok(report_room::v3::Response {});
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let report = Report {
|
let report = Report {
|
||||||
|
@ -101,7 +100,12 @@ pub(crate) async fn report_event_route(
|
||||||
|
|
||||||
// check if we know about the reported event ID or if it's invalid
|
// check if we know about the reported event ID or if it's invalid
|
||||||
let Ok(pdu) = services.rooms.timeline.get_pdu(&body.event_id).await else {
|
let Ok(pdu) = services.rooms.timeline.get_pdu(&body.event_id).await else {
|
||||||
return Err!(Request(NotFound("Event ID is not known to us or Event ID is invalid")));
|
info!(
|
||||||
|
"Received event report by user {sender_user} for room {} and event ID {}, but the \
|
||||||
|
event ID is not known to us or invalid.",
|
||||||
|
body.room_id, body.event_id
|
||||||
|
);
|
||||||
|
return Ok(report_content::v3::Response {});
|
||||||
};
|
};
|
||||||
|
|
||||||
is_event_report_valid(
|
is_event_report_valid(
|
||||||
|
@ -197,6 +201,8 @@ async fn is_event_report_valid(
|
||||||
valid"
|
valid"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Followup(MSC4277): Should we return 200 regardless in this check? but just
|
||||||
|
// log the warning?
|
||||||
if room_id != pdu.room_id {
|
if room_id != pdu.room_id {
|
||||||
return Err!(Request(NotFound("Event ID does not belong to the reported room",)));
|
return Err!(Request(NotFound("Event ID does not belong to the reported room",)));
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,14 +3,13 @@ use std::time::Duration;
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Error, Result, debug, err, info,
|
Err, Error, Result, debug, err, info, utils,
|
||||||
utils::{self, ReadyExt, hash},
|
utils::{ReadyExt, hash},
|
||||||
};
|
};
|
||||||
use conduwuit_core::{debug_error, debug_warn};
|
use conduwuit_service::uiaa::SESSION_ID_LENGTH;
|
||||||
use conduwuit_service::{Services, uiaa::SESSION_ID_LENGTH};
|
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
OwnedUserId, UserId,
|
UserId,
|
||||||
api::client::{
|
api::client::{
|
||||||
session::{
|
session::{
|
||||||
get_login_token,
|
get_login_token,
|
||||||
|
@ -50,154 +49,6 @@ pub(crate) async fn get_login_types_route(
|
||||||
]))
|
]))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Authenticates the given user by its ID and its password.
|
|
||||||
///
|
|
||||||
/// Returns the user ID if successful, and an error otherwise.
|
|
||||||
#[tracing::instrument(skip_all, fields(%user_id), name = "password")]
|
|
||||||
pub(crate) async fn password_login(
|
|
||||||
services: &Services,
|
|
||||||
user_id: &UserId,
|
|
||||||
lowercased_user_id: &UserId,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<OwnedUserId> {
|
|
||||||
// Restrict login to accounts only of type 'password', including untyped
|
|
||||||
// legacy accounts which are equivalent to 'password'.
|
|
||||||
if services
|
|
||||||
.users
|
|
||||||
.origin(user_id)
|
|
||||||
.await
|
|
||||||
.is_ok_and(|origin| origin != "password")
|
|
||||||
{
|
|
||||||
return Err!(Request(Forbidden("Account does not permit password login.")));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (hash, user_id) = match services.users.password_hash(user_id).await {
|
|
||||||
| Ok(hash) => (hash, user_id),
|
|
||||||
| Err(_) => services
|
|
||||||
.users
|
|
||||||
.password_hash(lowercased_user_id)
|
|
||||||
.await
|
|
||||||
.map(|hash| (hash, lowercased_user_id))
|
|
||||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?,
|
|
||||||
};
|
|
||||||
|
|
||||||
if hash.is_empty() {
|
|
||||||
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
|
||||||
}
|
|
||||||
|
|
||||||
hash::verify_password(password, &hash)
|
|
||||||
.inspect_err(|e| debug_error!("{e}"))
|
|
||||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
|
||||||
|
|
||||||
Ok(user_id.to_owned())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Authenticates the given user through the configured LDAP server.
|
|
||||||
///
|
|
||||||
/// Creates the user if the user is found in the LDAP and do not already have an
|
|
||||||
/// account.
|
|
||||||
#[tracing::instrument(skip_all, fields(%user_id), name = "ldap")]
|
|
||||||
pub(super) async fn ldap_login(
|
|
||||||
services: &Services,
|
|
||||||
user_id: &UserId,
|
|
||||||
lowercased_user_id: &UserId,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<OwnedUserId> {
|
|
||||||
let (user_dn, is_ldap_admin) = match services.config.ldap.bind_dn.as_ref() {
|
|
||||||
| Some(bind_dn) if bind_dn.contains("{username}") =>
|
|
||||||
(bind_dn.replace("{username}", lowercased_user_id.localpart()), false),
|
|
||||||
| _ => {
|
|
||||||
debug!("Searching user in LDAP");
|
|
||||||
|
|
||||||
let dns = services.users.search_ldap(user_id).await?;
|
|
||||||
if dns.len() >= 2 {
|
|
||||||
return Err!(Ldap("LDAP search returned two or more results"));
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some((user_dn, is_admin)) = dns.first() else {
|
|
||||||
return password_login(services, user_id, lowercased_user_id, password).await;
|
|
||||||
};
|
|
||||||
|
|
||||||
(user_dn.clone(), *is_admin)
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let user_id = services
|
|
||||||
.users
|
|
||||||
.auth_ldap(&user_dn, password)
|
|
||||||
.await
|
|
||||||
.map(|()| lowercased_user_id.to_owned())?;
|
|
||||||
|
|
||||||
// LDAP users are automatically created on first login attempt. This is a very
|
|
||||||
// common feature that can be seen on many services using a LDAP provider for
|
|
||||||
// their users (synapse, Nextcloud, Jellyfin, ...).
|
|
||||||
//
|
|
||||||
// LDAP users are crated with a dummy password but non empty because an empty
|
|
||||||
// password is reserved for deactivated accounts. The conduwuit password field
|
|
||||||
// will never be read to login a LDAP user so it's not an issue.
|
|
||||||
if !services.users.exists(lowercased_user_id).await {
|
|
||||||
services
|
|
||||||
.users
|
|
||||||
.create(lowercased_user_id, Some("*"), Some("ldap"))
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let is_conduwuit_admin = services.admin.user_is_admin(lowercased_user_id).await;
|
|
||||||
|
|
||||||
if is_ldap_admin && !is_conduwuit_admin {
|
|
||||||
services.admin.make_user_admin(lowercased_user_id).await?;
|
|
||||||
} else if !is_ldap_admin && is_conduwuit_admin {
|
|
||||||
services.admin.revoke_admin(lowercased_user_id).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(user_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn handle_login(
|
|
||||||
services: &Services,
|
|
||||||
body: &Ruma<login::v3::Request>,
|
|
||||||
identifier: Option<&uiaa::UserIdentifier>,
|
|
||||||
password: &str,
|
|
||||||
user: Option<&String>,
|
|
||||||
) -> Result<OwnedUserId> {
|
|
||||||
debug!("Got password login type");
|
|
||||||
let user_id =
|
|
||||||
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
|
||||||
UserId::parse_with_server_name(user_id, &services.config.server_name)
|
|
||||||
} else if let Some(user) = user {
|
|
||||||
UserId::parse_with_server_name(user, &services.config.server_name)
|
|
||||||
} else {
|
|
||||||
return Err!(Request(Unknown(
|
|
||||||
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
|
|
||||||
|
|
||||||
let lowercased_user_id = UserId::parse_with_server_name(
|
|
||||||
user_id.localpart().to_lowercase(),
|
|
||||||
&services.config.server_name,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if !services.globals.user_is_local(&user_id)
|
|
||||||
|| !services.globals.user_is_local(&lowercased_user_id)
|
|
||||||
{
|
|
||||||
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg!(feature = "ldap") && services.config.ldap.enable {
|
|
||||||
match Box::pin(ldap_login(services, &user_id, &lowercased_user_id, password)).await {
|
|
||||||
| Ok(user_id) => Ok(user_id),
|
|
||||||
| Err(err) if services.config.ldap.ldap_only => Err(err),
|
|
||||||
| Err(err) => {
|
|
||||||
debug_warn!("{err}");
|
|
||||||
password_login(services, &user_id, &lowercased_user_id, password).await
|
|
||||||
},
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
password_login(services, &user_id, &lowercased_user_id, password).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/login`
|
/// # `POST /_matrix/client/v3/login`
|
||||||
///
|
///
|
||||||
/// Authenticates the user and returns an access token it can use in subsequent
|
/// Authenticates the user and returns an access token it can use in subsequent
|
||||||
|
@ -229,7 +80,70 @@ pub(crate) async fn login_route(
|
||||||
password,
|
password,
|
||||||
user,
|
user,
|
||||||
..
|
..
|
||||||
}) => handle_login(&services, &body, identifier.as_ref(), password, user.as_ref()).await?,
|
}) => {
|
||||||
|
debug!("Got password login type");
|
||||||
|
let user_id =
|
||||||
|
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||||
|
UserId::parse_with_server_name(user_id, &services.config.server_name)
|
||||||
|
} else if let Some(user) = user {
|
||||||
|
UserId::parse_with_server_name(user, &services.config.server_name)
|
||||||
|
} else {
|
||||||
|
return Err!(Request(Unknown(
|
||||||
|
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
|
||||||
|
|
||||||
|
let lowercased_user_id = UserId::parse_with_server_name(
|
||||||
|
user_id.localpart().to_lowercase(),
|
||||||
|
&services.config.server_name,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if !services.globals.user_is_local(&user_id)
|
||||||
|
|| !services.globals.user_is_local(&lowercased_user_id)
|
||||||
|
{
|
||||||
|
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
|
||||||
|
}
|
||||||
|
|
||||||
|
// first try the username as-is
|
||||||
|
let hash = services
|
||||||
|
.users
|
||||||
|
.password_hash(&user_id)
|
||||||
|
.await
|
||||||
|
.inspect_err(|e| debug!("{e}"));
|
||||||
|
|
||||||
|
match hash {
|
||||||
|
| Ok(hash) => {
|
||||||
|
if hash.is_empty() {
|
||||||
|
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
||||||
|
}
|
||||||
|
|
||||||
|
hash::verify_password(password, &hash)
|
||||||
|
.inspect_err(|e| debug!("{e}"))
|
||||||
|
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
||||||
|
|
||||||
|
user_id
|
||||||
|
},
|
||||||
|
| Err(_e) => {
|
||||||
|
let hash_lowercased_user_id = services
|
||||||
|
.users
|
||||||
|
.password_hash(&lowercased_user_id)
|
||||||
|
.await
|
||||||
|
.inspect_err(|e| debug!("{e}"))
|
||||||
|
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
||||||
|
|
||||||
|
if hash_lowercased_user_id.is_empty() {
|
||||||
|
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
||||||
|
}
|
||||||
|
|
||||||
|
hash::verify_password(password, &hash_lowercased_user_id)
|
||||||
|
.inspect_err(|e| debug!("{e}"))
|
||||||
|
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
||||||
|
|
||||||
|
lowercased_user_id
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
| login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
| login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
||||||
debug!("Got token login type");
|
debug!("Got token login type");
|
||||||
if !services.server.config.login_via_existing_session {
|
if !services.server.config.login_via_existing_session {
|
||||||
|
@ -284,8 +198,8 @@ pub(crate) async fn login_route(
|
||||||
.clone()
|
.clone()
|
||||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||||
|
|
||||||
// Generate a new token for the device (ensuring no collisions)
|
// Generate a new token for the device
|
||||||
let token = services.users.generate_unique_token().await;
|
let token = utils::random_string(TOKEN_LENGTH);
|
||||||
|
|
||||||
// Determine if device_id was provided and exists in the db for this user
|
// Determine if device_id was provided and exists in the db for this user
|
||||||
let device_exists = if body.device_id.is_some() {
|
let device_exists = if body.device_id.is_some() {
|
||||||
|
|
|
@ -430,7 +430,7 @@ async fn handle_left_room(
|
||||||
.ok();
|
.ok();
|
||||||
|
|
||||||
// Left before last sync
|
// Left before last sync
|
||||||
if (Some(since) >= left_count && !include_leave) || Some(next_batch) < left_count {
|
if Some(since) >= left_count {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,6 @@ use crate::{
|
||||||
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
||||||
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
||||||
|
|
||||||
#[allow(clippy::cognitive_complexity)]
|
|
||||||
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
|
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
|
||||||
///
|
///
|
||||||
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
|
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
|
||||||
|
|
|
@ -292,7 +292,7 @@ pub(crate) async fn get_timezone_key_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None, None).await?;
|
services.users.create(&body.user_id, None)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -352,7 +352,7 @@ pub(crate) async fn get_profile_key_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None, None).await?;
|
services.users.create(&body.user_id, None)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
|
|
@ -58,7 +58,6 @@ pub(crate) async fn get_supported_versions_route(
|
||||||
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
|
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
|
||||||
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
|
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
|
||||||
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
|
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
|
||||||
("uk.timedout.msc4323".to_owned(), true), /* agnostic suspend (https://github.com/matrix-org/matrix-spec-proposals/pull/4323) */
|
|
||||||
]),
|
]),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
use conduwuit::{
|
|
||||||
Result, err,
|
|
||||||
matrix::pdu::{PduCount, ShortEventId},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Parse a pagination token, trying ShortEventId first, then falling back to
|
|
||||||
/// PduCount
|
|
||||||
pub(crate) fn parse_pagination_token(token: &str) -> Result<PduCount> {
|
|
||||||
// Try parsing as ShortEventId first
|
|
||||||
if let Ok(shorteventid) = token.parse::<ShortEventId>() {
|
|
||||||
// ShortEventId maps directly to a PduCount in our database
|
|
||||||
Ok(PduCount::Normal(shorteventid))
|
|
||||||
} else if let Ok(count) = token.parse::<u64>() {
|
|
||||||
// Fallback to PduCount for backwards compatibility
|
|
||||||
Ok(PduCount::Normal(count))
|
|
||||||
} else if let Ok(count) = token.parse::<i64>() {
|
|
||||||
// Also handle negative counts for backfilled events
|
|
||||||
Ok(PduCount::from_signed(count))
|
|
||||||
} else {
|
|
||||||
Err(err!(Request(InvalidParam("Invalid pagination token"))))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert a PduCount to a token string (using the underlying ShortEventId)
|
|
||||||
pub(crate) fn count_to_token(count: PduCount) -> String {
|
|
||||||
// The PduCount's unsigned value IS the ShortEventId
|
|
||||||
count.into_unsigned().to_string()
|
|
||||||
}
|
|
|
@ -184,8 +184,6 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
|
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
|
||||||
get(client::get_room_summary_legacy)
|
get(client::get_room_summary_legacy)
|
||||||
)
|
)
|
||||||
.ruma_route(&client::get_suspended_status)
|
|
||||||
.ruma_route(&client::put_suspended_status)
|
|
||||||
.ruma_route(&client::well_known_support)
|
.ruma_route(&client::well_known_support)
|
||||||
.ruma_route(&client::well_known_client)
|
.ruma_route(&client::well_known_client)
|
||||||
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
||||||
|
|
|
@ -5,14 +5,6 @@ use axum_extra::{
|
||||||
typed_header::TypedHeaderRejectionReason,
|
typed_header::TypedHeaderRejectionReason,
|
||||||
};
|
};
|
||||||
use conduwuit::{Err, Error, Result, debug_error, err, warn};
|
use conduwuit::{Err, Error, Result, debug_error, err, warn};
|
||||||
use futures::{
|
|
||||||
TryFutureExt,
|
|
||||||
future::{
|
|
||||||
Either::{Left, Right},
|
|
||||||
select_ok,
|
|
||||||
},
|
|
||||||
pin_mut,
|
|
||||||
};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
||||||
api::{
|
api::{
|
||||||
|
@ -62,7 +54,17 @@ pub(super) async fn auth(
|
||||||
| None => request.query.access_token.as_deref(),
|
| None => request.query.access_token.as_deref(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let token = find_token(services, token).await?;
|
let token = if let Some(token) = token {
|
||||||
|
match services.appservice.find_from_token(token).await {
|
||||||
|
| Some(reg_info) => Token::Appservice(Box::new(reg_info)),
|
||||||
|
| _ => match services.users.find_from_token(token).await {
|
||||||
|
| Ok((user_id, device_id)) => Token::User((user_id, device_id)),
|
||||||
|
| _ => Token::Invalid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Token::None
|
||||||
|
};
|
||||||
|
|
||||||
if metadata.authentication == AuthScheme::None {
|
if metadata.authentication == AuthScheme::None {
|
||||||
match metadata {
|
match metadata {
|
||||||
|
@ -340,25 +342,3 @@ async fn parse_x_matrix(request: &mut Request) -> Result<XMatrix> {
|
||||||
|
|
||||||
Ok(x_matrix)
|
Ok(x_matrix)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn find_token(services: &Services, token: Option<&str>) -> Result<Token> {
|
|
||||||
let Some(token) = token else {
|
|
||||||
return Ok(Token::None);
|
|
||||||
};
|
|
||||||
|
|
||||||
let user_token = services.users.find_from_token(token).map_ok(Token::User);
|
|
||||||
|
|
||||||
let appservice_token = services
|
|
||||||
.appservice
|
|
||||||
.find_from_token(token)
|
|
||||||
.map_ok(Box::new)
|
|
||||||
.map_ok(Token::Appservice);
|
|
||||||
|
|
||||||
pin_mut!(user_token, appservice_token);
|
|
||||||
// Returns Ok if either token type succeeds, Err only if both fail
|
|
||||||
match select_ok([Left(user_token), Right(appservice_token)]).await {
|
|
||||||
| Err(e) if !e.is_not_found() => Err(e),
|
|
||||||
| Ok((token, _)) => Ok(token),
|
|
||||||
| _ => Ok(Token::Invalid),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#![allow(clippy::doc_link_with_quotes)]
|
|
||||||
pub mod check;
|
pub mod check;
|
||||||
pub mod manager;
|
pub mod manager;
|
||||||
pub mod proxy;
|
pub mod proxy;
|
||||||
|
@ -126,11 +125,9 @@ pub struct Config {
|
||||||
/// This is the only directory where continuwuity will save its data,
|
/// This is the only directory where continuwuity will save its data,
|
||||||
/// including media. Note: this was previously "/var/lib/matrix-conduit".
|
/// including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||||
///
|
///
|
||||||
/// YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
|
/// YOU NEED TO EDIT THIS.
|
||||||
/// `systemd` service. The service file sets it to `/var/lib/conduwuit`
|
|
||||||
/// using an environment variable and also grants write access.
|
|
||||||
///
|
///
|
||||||
/// example: "/var/lib/conduwuit"
|
/// example: "/var/lib/continuwuity"
|
||||||
pub database_path: PathBuf,
|
pub database_path: PathBuf,
|
||||||
|
|
||||||
/// continuwuity supports online database backups using RocksDB's Backup
|
/// continuwuity supports online database backups using RocksDB's Backup
|
||||||
|
@ -714,21 +711,12 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub well_known: WellKnownConfig,
|
pub well_known: WellKnownConfig,
|
||||||
|
|
||||||
/// Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
|
#[serde(default)]
|
||||||
/// Jaeger exporter. Traces will be sent via OTLP to a collector (such as
|
pub allow_jaeger: bool,
|
||||||
/// Jaeger) that supports the OpenTelemetry Protocol.
|
|
||||||
///
|
|
||||||
/// Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
|
|
||||||
/// environment variable (defaults to http://localhost:4318).
|
|
||||||
#[serde(default, alias = "allow_jaeger")]
|
|
||||||
pub allow_otlp: bool,
|
|
||||||
|
|
||||||
/// Filter for OTLP tracing spans. This controls which spans are exported
|
|
||||||
/// to the OTLP collector.
|
|
||||||
///
|
|
||||||
/// default: "info"
|
/// default: "info"
|
||||||
#[serde(default = "default_otlp_filter", alias = "jaeger_filter")]
|
#[serde(default = "default_jaeger_filter")]
|
||||||
pub otlp_filter: String,
|
pub jaeger_filter: String,
|
||||||
|
|
||||||
/// If the 'perf_measurements' compile-time feature is enabled, enables
|
/// If the 'perf_measurements' compile-time feature is enabled, enables
|
||||||
/// collecting folded stack trace profile of tracing spans using
|
/// collecting folded stack trace profile of tracing spans using
|
||||||
|
@ -1959,10 +1947,6 @@ pub struct Config {
|
||||||
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
|
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
|
||||||
bool,
|
bool,
|
||||||
|
|
||||||
// external structure; separate section
|
|
||||||
#[serde(default)]
|
|
||||||
pub ldap: LdapConfig,
|
|
||||||
|
|
||||||
// external structure; separate section
|
// external structure; separate section
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub blurhashing: BlurhashConfig,
|
pub blurhashing: BlurhashConfig,
|
||||||
|
@ -2057,114 +2041,6 @@ pub struct BlurhashConfig {
|
||||||
pub blurhash_max_raw_size: u64,
|
pub blurhash_max_raw_size: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize)]
|
|
||||||
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.ldap")]
|
|
||||||
pub struct LdapConfig {
|
|
||||||
/// Whether to enable LDAP login.
|
|
||||||
///
|
|
||||||
/// example: "true"
|
|
||||||
#[serde(default)]
|
|
||||||
pub enable: bool,
|
|
||||||
|
|
||||||
/// Whether to force LDAP authentication or authorize classical password
|
|
||||||
/// login.
|
|
||||||
///
|
|
||||||
/// example: "true"
|
|
||||||
#[serde(default)]
|
|
||||||
pub ldap_only: bool,
|
|
||||||
|
|
||||||
/// URI of the LDAP server.
|
|
||||||
///
|
|
||||||
/// example: "ldap://ldap.example.com:389"
|
|
||||||
///
|
|
||||||
/// default: ""
|
|
||||||
#[serde(default)]
|
|
||||||
pub uri: Option<Url>,
|
|
||||||
|
|
||||||
/// Root of the searches.
|
|
||||||
///
|
|
||||||
/// example: "ou=users,dc=example,dc=org"
|
|
||||||
///
|
|
||||||
/// default: ""
|
|
||||||
#[serde(default)]
|
|
||||||
pub base_dn: String,
|
|
||||||
|
|
||||||
/// Bind DN if anonymous search is not enabled.
|
|
||||||
///
|
|
||||||
/// You can use the variable `{username}` that will be replaced by the
|
|
||||||
/// entered username. In such case, the password used to bind will be the
|
|
||||||
/// one provided for the login and not the one given by
|
|
||||||
/// `bind_password_file`. Beware: automatically granting admin rights will
|
|
||||||
/// not work if you use this direct bind instead of a LDAP search.
|
|
||||||
///
|
|
||||||
/// example: "cn=ldap-reader,dc=example,dc=org" or
|
|
||||||
/// "cn={username},ou=users,dc=example,dc=org"
|
|
||||||
///
|
|
||||||
/// default: ""
|
|
||||||
#[serde(default)]
|
|
||||||
pub bind_dn: Option<String>,
|
|
||||||
|
|
||||||
/// Path to a file on the system that contains the password for the
|
|
||||||
/// `bind_dn`.
|
|
||||||
///
|
|
||||||
/// The server must be able to access the file, and it must not be empty.
|
|
||||||
///
|
|
||||||
/// default: ""
|
|
||||||
#[serde(default)]
|
|
||||||
pub bind_password_file: Option<PathBuf>,
|
|
||||||
|
|
||||||
/// Search filter to limit user searches.
|
|
||||||
///
|
|
||||||
/// You can use the variable `{username}` that will be replaced by the
|
|
||||||
/// entered username for more complex filters.
|
|
||||||
///
|
|
||||||
/// example: "(&(objectClass=person)(memberOf=matrix))"
|
|
||||||
///
|
|
||||||
/// default: "(objectClass=*)"
|
|
||||||
#[serde(default = "default_ldap_search_filter")]
|
|
||||||
pub filter: String,
|
|
||||||
|
|
||||||
/// Attribute to use to uniquely identify the user.
|
|
||||||
///
|
|
||||||
/// example: "uid" or "cn"
|
|
||||||
///
|
|
||||||
/// default: "uid"
|
|
||||||
#[serde(default = "default_ldap_uid_attribute")]
|
|
||||||
pub uid_attribute: String,
|
|
||||||
|
|
||||||
/// Attribute containing the display name of the user.
|
|
||||||
///
|
|
||||||
/// example: "givenName" or "sn"
|
|
||||||
///
|
|
||||||
/// default: "givenName"
|
|
||||||
#[serde(default = "default_ldap_name_attribute")]
|
|
||||||
pub name_attribute: String,
|
|
||||||
|
|
||||||
/// Root of the searches for admin users.
|
|
||||||
///
|
|
||||||
/// Defaults to `base_dn` if empty.
|
|
||||||
///
|
|
||||||
/// example: "ou=admins,dc=example,dc=org"
|
|
||||||
///
|
|
||||||
/// default: ""
|
|
||||||
#[serde(default)]
|
|
||||||
pub admin_base_dn: String,
|
|
||||||
|
|
||||||
/// The LDAP search filter to find administrative users for continuwuity.
|
|
||||||
///
|
|
||||||
/// If left blank, administrative state must be configured manually for each
|
|
||||||
/// user.
|
|
||||||
///
|
|
||||||
/// You can use the variable `{username}` that will be replaced by the
|
|
||||||
/// entered username for more complex filters.
|
|
||||||
///
|
|
||||||
/// example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
|
|
||||||
///
|
|
||||||
/// default: ""
|
|
||||||
#[serde(default)]
|
|
||||||
pub admin_filter: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Clone, Debug)]
|
#[derive(Deserialize, Clone, Debug)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
struct ListeningPort {
|
struct ListeningPort {
|
||||||
|
@ -2376,7 +2252,7 @@ fn default_tracing_flame_filter() -> String {
|
||||||
.to_owned()
|
.to_owned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_otlp_filter() -> String {
|
fn default_jaeger_filter() -> String {
|
||||||
cfg!(debug_assertions)
|
cfg!(debug_assertions)
|
||||||
.then_some("trace,h2=off")
|
.then_some("trace,h2=off")
|
||||||
.unwrap_or("info")
|
.unwrap_or("info")
|
||||||
|
@ -2554,9 +2430,3 @@ pub(super) fn default_blurhash_x_component() -> u32 { 4 }
|
||||||
pub(super) fn default_blurhash_y_component() -> u32 { 3 }
|
pub(super) fn default_blurhash_y_component() -> u32 { 3 }
|
||||||
|
|
||||||
// end recommended & blurhashing defaults
|
// end recommended & blurhashing defaults
|
||||||
|
|
||||||
fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() }
|
|
||||||
|
|
||||||
fn default_ldap_uid_attribute() -> String { String::from("uid") }
|
|
||||||
|
|
||||||
fn default_ldap_name_attribute() -> String { String::from("givenName") }
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ pub fn trap() {
|
||||||
|
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str {
|
pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str {
|
||||||
(**p).downcast_ref::<&str>().copied().unwrap_or_default()
|
p.downcast_ref::<&str>().copied().unwrap_or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
|
|
@ -110,8 +110,6 @@ pub enum Error {
|
||||||
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
|
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
IntoHttp(#[from] ruma::api::error::IntoHttpError),
|
IntoHttp(#[from] ruma::api::error::IntoHttpError),
|
||||||
#[error("{0}")]
|
|
||||||
Ldap(Cow<'static, str>),
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
Mxc(#[from] ruma::MxcUriError),
|
Mxc(#[from] ruma::MxcUriError),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
|
|
|
@ -18,7 +18,7 @@ pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[
|
||||||
|
|
||||||
/// Experimental, partially supported room versions
|
/// Experimental, partially supported room versions
|
||||||
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
|
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
|
||||||
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
|
&[RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
|
||||||
|
|
||||||
type RoomVersion = (RoomVersionId, RoomVersionStability);
|
type RoomVersion = (RoomVersionId, RoomVersionStability);
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,6 @@ serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
ctor.workspace = true
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
|
@ -19,7 +19,7 @@ where
|
||||||
S: Stream<Item = K> + Send + 'a,
|
S: Stream<Item = K> + Send + 'a,
|
||||||
K: AsRef<[u8]> + Send + Sync + 'a,
|
K: AsRef<[u8]> + Send + Sync + 'a,
|
||||||
{
|
{
|
||||||
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
|
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, K, S> Get<'a, K, S> for S
|
impl<'a, K, S> Get<'a, K, S> for S
|
||||||
|
@ -29,7 +29,7 @@ where
|
||||||
K: AsRef<[u8]> + Send + Sync + 'a,
|
K: AsRef<[u8]> + Send + Sync + 'a,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
|
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a {
|
||||||
map.get_batch(self)
|
map.get_batch(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ where
|
||||||
pub(crate) fn get_batch<'a, S, K>(
|
pub(crate) fn get_batch<'a, S, K>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
keys: S,
|
keys: S,
|
||||||
) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
|
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
|
||||||
where
|
where
|
||||||
S: Stream<Item = K> + Send + 'a,
|
S: Stream<Item = K> + Send + 'a,
|
||||||
K: AsRef<[u8]> + Send + Sync + 'a,
|
K: AsRef<[u8]> + Send + Sync + 'a,
|
||||||
|
|
|
@ -10,7 +10,7 @@ use super::stream::is_cached;
|
||||||
use crate::{keyval, keyval::Key, stream};
|
use crate::{keyval, keyval::Key, stream};
|
||||||
|
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
|
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send
|
||||||
where
|
where
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
{
|
{
|
||||||
|
|
|
@ -15,7 +15,7 @@ use crate::{
|
||||||
pub fn keys_from<'a, K, P>(
|
pub fn keys_from<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -40,7 +40,7 @@ where
|
||||||
pub fn keys_raw_from<'a, K, P>(
|
pub fn keys_raw_from<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
|
||||||
pub fn keys_prefix<'a, K, P>(
|
pub fn keys_prefix<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &P,
|
prefix: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -37,7 +37,7 @@ where
|
||||||
pub fn keys_raw_prefix<'a, K, P>(
|
pub fn keys_raw_prefix<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
|
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
K: Deserialize<'a> + Send + 'a,
|
K: Deserialize<'a> + Send + 'a,
|
||||||
|
@ -50,7 +50,7 @@ where
|
||||||
pub fn raw_keys_prefix<'a, P>(
|
pub fn raw_keys_prefix<'a, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
|
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -17,7 +17,7 @@ where
|
||||||
S: Stream<Item = K> + Send + 'a,
|
S: Stream<Item = K> + Send + 'a,
|
||||||
K: Serialize + Debug,
|
K: Serialize + Debug,
|
||||||
{
|
{
|
||||||
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
|
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, K, S> Qry<'a, K, S> for S
|
impl<'a, K, S> Qry<'a, K, S> for S
|
||||||
|
@ -27,7 +27,7 @@ where
|
||||||
K: Serialize + Debug + 'a,
|
K: Serialize + Debug + 'a,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
|
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a {
|
||||||
map.qry_batch(self)
|
map.qry_batch(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ where
|
||||||
pub(crate) fn qry_batch<'a, S, K>(
|
pub(crate) fn qry_batch<'a, S, K>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
keys: S,
|
keys: S,
|
||||||
) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
|
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
|
||||||
where
|
where
|
||||||
S: Stream<Item = K> + Send + 'a,
|
S: Stream<Item = K> + Send + 'a,
|
||||||
K: Serialize + Debug + 'a,
|
K: Serialize + Debug + 'a,
|
||||||
|
|
|
@ -10,7 +10,7 @@ use super::rev_stream::is_cached;
|
||||||
use crate::{keyval, keyval::Key, stream};
|
use crate::{keyval, keyval::Key, stream};
|
||||||
|
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
|
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send
|
||||||
where
|
where
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
{
|
{
|
||||||
|
|
|
@ -15,7 +15,7 @@ use crate::{
|
||||||
pub fn rev_keys_from<'a, K, P>(
|
pub fn rev_keys_from<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -41,7 +41,7 @@ where
|
||||||
pub fn rev_keys_raw_from<'a, K, P>(
|
pub fn rev_keys_raw_from<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
|
||||||
pub fn rev_keys_prefix<'a, K, P>(
|
pub fn rev_keys_prefix<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &P,
|
prefix: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -37,7 +37,7 @@ where
|
||||||
pub fn rev_keys_raw_prefix<'a, K, P>(
|
pub fn rev_keys_raw_prefix<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
|
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
K: Deserialize<'a> + Send + 'a,
|
K: Deserialize<'a> + Send + 'a,
|
||||||
|
@ -50,7 +50,7 @@ where
|
||||||
pub fn rev_raw_keys_prefix<'a, P>(
|
pub fn rev_raw_keys_prefix<'a, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
|
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn rev_stream<'a, K, V>(
|
pub fn rev_stream<'a, K, V>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send
|
||||||
where
|
where
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
V: Deserialize<'a> + Send,
|
V: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -20,7 +20,7 @@ use crate::{
|
||||||
pub fn rev_stream_from<'a, K, V, P>(
|
pub fn rev_stream_from<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -55,7 +55,7 @@ where
|
||||||
pub fn rev_stream_raw_from<'a, K, V, P>(
|
pub fn rev_stream_raw_from<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
|
||||||
pub fn rev_stream_prefix<'a, K, V, P>(
|
pub fn rev_stream_prefix<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &P,
|
prefix: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -50,7 +50,7 @@ where
|
||||||
pub fn rev_stream_raw_prefix<'a, K, V, P>(
|
pub fn rev_stream_raw_prefix<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
K: Deserialize<'a> + Send + 'a,
|
K: Deserialize<'a> + Send + 'a,
|
||||||
|
@ -68,7 +68,7 @@ where
|
||||||
pub fn rev_raw_stream_prefix<'a, P>(
|
pub fn rev_raw_stream_prefix<'a, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
|
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn stream<'a, K, V>(
|
pub fn stream<'a, K, V>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send
|
||||||
where
|
where
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
V: Deserialize<'a> + Send,
|
V: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -19,7 +19,7 @@ use crate::{
|
||||||
pub fn stream_from<'a, K, V, P>(
|
pub fn stream_from<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -53,7 +53,7 @@ where
|
||||||
pub fn stream_raw_from<'a, K, V, P>(
|
pub fn stream_raw_from<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
|
||||||
pub fn stream_prefix<'a, K, V, P>(
|
pub fn stream_prefix<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &P,
|
prefix: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -50,7 +50,7 @@ where
|
||||||
pub fn stream_raw_prefix<'a, K, V, P>(
|
pub fn stream_raw_prefix<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
|
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
K: Deserialize<'a> + Send + 'a,
|
K: Deserialize<'a> + Send + 'a,
|
||||||
|
@ -68,7 +68,7 @@ where
|
||||||
pub fn raw_stream_prefix<'a, P>(
|
pub fn raw_stream_prefix<'a, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
|
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -374,10 +374,6 @@ pub(super) static MAPS: &[Descriptor] = &[
|
||||||
name: "userid_masterkeyid",
|
name: "userid_masterkeyid",
|
||||||
..descriptor::RANDOM_SMALL
|
..descriptor::RANDOM_SMALL
|
||||||
},
|
},
|
||||||
Descriptor {
|
|
||||||
name: "userid_origin",
|
|
||||||
..descriptor::RANDOM
|
|
||||||
},
|
|
||||||
Descriptor {
|
Descriptor {
|
||||||
name: "userid_password",
|
name: "userid_password",
|
||||||
..descriptor::RANDOM
|
..descriptor::RANDOM
|
||||||
|
|
|
@ -3,8 +3,6 @@
|
||||||
extern crate conduwuit_core as conduwuit;
|
extern crate conduwuit_core as conduwuit;
|
||||||
extern crate rust_rocksdb as rocksdb;
|
extern crate rust_rocksdb as rocksdb;
|
||||||
|
|
||||||
use ctor::{ctor, dtor};
|
|
||||||
|
|
||||||
conduwuit::mod_ctor! {}
|
conduwuit::mod_ctor! {}
|
||||||
conduwuit::mod_dtor! {}
|
conduwuit::mod_dtor! {}
|
||||||
conduwuit::rustc_flags_capture! {}
|
conduwuit::rustc_flags_capture! {}
|
||||||
|
|
|
@ -443,7 +443,7 @@ pub(crate) fn into_send_seek(result: stream::State<'_>) -> stream::State<'static
|
||||||
unsafe { std::mem::transmute(result) }
|
unsafe { std::mem::transmute(result) }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'static> {
|
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'_> {
|
||||||
// SAFETY: This is to receive the State from the channel; see above.
|
// SAFETY: This is to receive the State from the channel; see above.
|
||||||
unsafe { std::mem::transmute(result) }
|
unsafe { std::mem::transmute(result) }
|
||||||
}
|
}
|
||||||
|
|
|
@ -326,7 +326,7 @@ fn ser_array() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "arrayvec deserialization is not implemented (separators)"]
|
#[ignore]
|
||||||
fn de_array() {
|
fn de_array() {
|
||||||
let a: u64 = 123_456;
|
let a: u64 = 123_456;
|
||||||
let b: u64 = 987_654;
|
let b: u64 = 987_654;
|
||||||
|
@ -358,7 +358,7 @@ fn de_array() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Nested sequences are not supported"]
|
#[ignore]
|
||||||
fn de_complex() {
|
fn de_complex() {
|
||||||
type Key<'a> = (&'a UserId, ArrayVec<u64, 2>, &'a RoomId);
|
type Key<'a> = (&'a UserId, ArrayVec<u64, 2>, &'a RoomId);
|
||||||
|
|
||||||
|
|
|
@ -13,13 +13,13 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream {
|
||||||
let ret = quote! {
|
let ret = quote! {
|
||||||
pub static RUSTC_FLAGS: [&str; #flag_len] = [#( #flag ),*];
|
pub static RUSTC_FLAGS: [&str; #flag_len] = [#( #flag ),*];
|
||||||
|
|
||||||
#[ctor]
|
#[conduwuit_core::ctor]
|
||||||
fn _set_rustc_flags() {
|
fn _set_rustc_flags() {
|
||||||
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
|
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static strings have to be yanked on module unload
|
// static strings have to be yanked on module unload
|
||||||
#[dtor]
|
#[conduwuit_core::dtor]
|
||||||
fn _unset_rustc_flags() {
|
fn _unset_rustc_flags() {
|
||||||
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
|
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,12 +32,12 @@ a cool hard fork of Conduit, a Matrix homeserver written in Rust"""
|
||||||
section = "net"
|
section = "net"
|
||||||
priority = "optional"
|
priority = "optional"
|
||||||
conf-files = ["/etc/conduwuit/conduwuit.toml"]
|
conf-files = ["/etc/conduwuit/conduwuit.toml"]
|
||||||
maintainer-scripts = "../../pkg/debian/"
|
maintainer-scripts = "../../debian/"
|
||||||
systemd-units = { unit-name = "conduwuit", start = false, unit-scripts = "../../pkg/" }
|
systemd-units = { unit-name = "conduwuit", start = false }
|
||||||
assets = [
|
assets = [
|
||||||
["../../pkg/debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"],
|
["../../debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"],
|
||||||
["../../README.md", "usr/share/doc/conduwuit/", "644"],
|
["../../README.md", "usr/share/doc/conduwuit/", "644"],
|
||||||
["../../target/release/conduwuit", "usr/bin/conduwuit", "755"],
|
["../../target/release/conduwuit", "usr/sbin/conduwuit", "755"],
|
||||||
["../../conduwuit-example.toml", "etc/conduwuit/conduwuit.toml", "640"],
|
["../../conduwuit-example.toml", "etc/conduwuit/conduwuit.toml", "640"],
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -56,7 +56,6 @@ standard = [
|
||||||
"jemalloc",
|
"jemalloc",
|
||||||
"jemalloc_conf",
|
"jemalloc_conf",
|
||||||
"journald",
|
"journald",
|
||||||
"ldap",
|
|
||||||
"media_thumbnail",
|
"media_thumbnail",
|
||||||
"systemd",
|
"systemd",
|
||||||
"url_preview",
|
"url_preview",
|
||||||
|
@ -64,7 +63,7 @@ standard = [
|
||||||
]
|
]
|
||||||
full = [
|
full = [
|
||||||
"standard",
|
"standard",
|
||||||
# "hardened_malloc", # Conflicts with jemalloc
|
"hardened_malloc",
|
||||||
"jemalloc_prof",
|
"jemalloc_prof",
|
||||||
"perf_measurements",
|
"perf_measurements",
|
||||||
"tokio_console"
|
"tokio_console"
|
||||||
|
@ -115,9 +114,6 @@ jemalloc_stats = [
|
||||||
jemalloc_conf = [
|
jemalloc_conf = [
|
||||||
"conduwuit-core/jemalloc_conf",
|
"conduwuit-core/jemalloc_conf",
|
||||||
]
|
]
|
||||||
ldap = [
|
|
||||||
"conduwuit-api/ldap",
|
|
||||||
]
|
|
||||||
media_thumbnail = [
|
media_thumbnail = [
|
||||||
"conduwuit-service/media_thumbnail",
|
"conduwuit-service/media_thumbnail",
|
||||||
]
|
]
|
||||||
|
@ -126,8 +122,7 @@ perf_measurements = [
|
||||||
"dep:tracing-flame",
|
"dep:tracing-flame",
|
||||||
"dep:tracing-opentelemetry",
|
"dep:tracing-opentelemetry",
|
||||||
"dep:opentelemetry_sdk",
|
"dep:opentelemetry_sdk",
|
||||||
"dep:opentelemetry-otlp",
|
"dep:opentelemetry-jaeger",
|
||||||
"dep:opentelemetry-jaeger-propagator",
|
|
||||||
"conduwuit-core/perf_measurements",
|
"conduwuit-core/perf_measurements",
|
||||||
"conduwuit-core/sentry_telemetry",
|
"conduwuit-core/sentry_telemetry",
|
||||||
]
|
]
|
||||||
|
@ -203,14 +198,11 @@ clap.workspace = true
|
||||||
console-subscriber.optional = true
|
console-subscriber.optional = true
|
||||||
console-subscriber.workspace = true
|
console-subscriber.workspace = true
|
||||||
const-str.workspace = true
|
const-str.workspace = true
|
||||||
ctor.workspace = true
|
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
|
opentelemetry-jaeger.optional = true
|
||||||
|
opentelemetry-jaeger.workspace = true
|
||||||
opentelemetry.optional = true
|
opentelemetry.optional = true
|
||||||
opentelemetry.workspace = true
|
opentelemetry.workspace = true
|
||||||
opentelemetry-otlp.optional = true
|
|
||||||
opentelemetry-otlp.workspace = true
|
|
||||||
opentelemetry-jaeger-propagator.optional = true
|
|
||||||
opentelemetry-jaeger-propagator.workspace = true
|
|
||||||
opentelemetry_sdk.optional = true
|
opentelemetry_sdk.optional = true
|
||||||
opentelemetry_sdk.workspace = true
|
opentelemetry_sdk.workspace = true
|
||||||
sentry-tower.optional = true
|
sentry-tower.optional = true
|
||||||
|
@ -230,7 +222,6 @@ tracing-subscriber.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
tracing-journald = { workspace = true, optional = true }
|
tracing-journald = { workspace = true, optional = true }
|
||||||
|
|
||||||
|
|
||||||
[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies]
|
[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies]
|
||||||
hardened_malloc-rs.workspace = true
|
hardened_malloc-rs.workspace = true
|
||||||
hardened_malloc-rs.optional = true
|
hardened_malloc-rs.optional = true
|
||||||
|
|
|
@ -7,8 +7,6 @@ use conduwuit_core::{
|
||||||
log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span},
|
log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span},
|
||||||
result::UnwrapOrErr,
|
result::UnwrapOrErr,
|
||||||
};
|
};
|
||||||
#[cfg(feature = "perf_measurements")]
|
|
||||||
use opentelemetry::trace::TracerProvider;
|
|
||||||
use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload};
|
use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload};
|
||||||
|
|
||||||
#[cfg(feature = "perf_measurements")]
|
#[cfg(feature = "perf_measurements")]
|
||||||
|
@ -89,35 +87,30 @@ pub(crate) fn init(
|
||||||
(None, None)
|
(None, None)
|
||||||
};
|
};
|
||||||
|
|
||||||
let otlp_filter = EnvFilter::try_new(&config.otlp_filter)
|
let jaeger_filter = EnvFilter::try_new(&config.jaeger_filter)
|
||||||
.map_err(|e| err!(Config("otlp_filter", "{e}.")))?;
|
.map_err(|e| err!(Config("jaeger_filter", "{e}.")))?;
|
||||||
|
|
||||||
let otlp_layer = config.allow_otlp.then(|| {
|
let jaeger_layer = config.allow_jaeger.then(|| {
|
||||||
opentelemetry::global::set_text_map_propagator(
|
opentelemetry::global::set_text_map_propagator(
|
||||||
opentelemetry_jaeger_propagator::Propagator::new(),
|
opentelemetry_jaeger::Propagator::new(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let exporter = opentelemetry_otlp::SpanExporter::builder()
|
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||||
.with_http()
|
.with_auto_split_batch(true)
|
||||||
.build()
|
.with_service_name(conduwuit_core::name())
|
||||||
.expect("Failed to create OTLP exporter");
|
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||||
|
.expect("jaeger agent pipeline");
|
||||||
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
|
|
||||||
.with_batch_exporter(exporter)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
let tracer = provider.tracer(conduwuit_core::name());
|
|
||||||
|
|
||||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||||
|
|
||||||
let (otlp_reload_filter, otlp_reload_handle) =
|
let (jaeger_reload_filter, jaeger_reload_handle) =
|
||||||
reload::Layer::new(otlp_filter.clone());
|
reload::Layer::new(jaeger_filter.clone());
|
||||||
reload_handles.add("otlp", Box::new(otlp_reload_handle));
|
reload_handles.add("jaeger", Box::new(jaeger_reload_handle));
|
||||||
|
|
||||||
Some(telemetry.with_filter(otlp_reload_filter))
|
Some(telemetry.with_filter(jaeger_reload_filter))
|
||||||
});
|
});
|
||||||
|
|
||||||
let subscriber = subscriber.with(flame_layer).with(otlp_layer);
|
let subscriber = subscriber.with(flame_layer).with(jaeger_layer);
|
||||||
(subscriber, flame_guard)
|
(subscriber, flame_guard)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,6 @@ mod sentry;
|
||||||
mod server;
|
mod server;
|
||||||
mod signal;
|
mod signal;
|
||||||
|
|
||||||
use ctor::{ctor, dtor};
|
|
||||||
use server::Server;
|
use server::Server;
|
||||||
|
|
||||||
rustc_flags_capture! {}
|
rustc_flags_capture! {}
|
||||||
|
|
|
@ -125,7 +125,6 @@ tokio.workspace = true
|
||||||
tower.workspace = true
|
tower.workspace = true
|
||||||
tower-http.workspace = true
|
tower-http.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
ctor.workspace = true
|
|
||||||
|
|
||||||
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
|
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
|
||||||
sd-notify.workspace = true
|
sd-notify.workspace = true
|
||||||
|
|
|
@ -12,7 +12,6 @@ use std::{panic::AssertUnwindSafe, pin::Pin, sync::Arc};
|
||||||
|
|
||||||
use conduwuit::{Error, Result, Server};
|
use conduwuit::{Error, Result, Server};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
use ctor::{ctor, dtor};
|
|
||||||
use futures::{Future, FutureExt, TryFutureExt};
|
use futures::{Future, FutureExt, TryFutureExt};
|
||||||
|
|
||||||
conduwuit::mod_ctor! {}
|
conduwuit::mod_ctor! {}
|
||||||
|
|
|
@ -30,7 +30,7 @@ use tower::{Service, ServiceExt};
|
||||||
|
|
||||||
type MakeService = IntoMakeServiceWithConnectInfo<Router, net::SocketAddr>;
|
type MakeService = IntoMakeServiceWithConnectInfo<Router, net::SocketAddr>;
|
||||||
|
|
||||||
const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0);
|
const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||||
const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750);
|
const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750);
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, level = "debug")]
|
#[tracing::instrument(skip_all, level = "debug")]
|
||||||
|
|
|
@ -53,9 +53,6 @@ jemalloc_stats = [
|
||||||
"conduwuit-core/jemalloc_stats",
|
"conduwuit-core/jemalloc_stats",
|
||||||
"conduwuit-database/jemalloc_stats",
|
"conduwuit-database/jemalloc_stats",
|
||||||
]
|
]
|
||||||
ldap = [
|
|
||||||
"dep:ldap3"
|
|
||||||
]
|
|
||||||
media_thumbnail = [
|
media_thumbnail = [
|
||||||
"dep:image",
|
"dep:image",
|
||||||
]
|
]
|
||||||
|
@ -92,8 +89,6 @@ image.workspace = true
|
||||||
image.optional = true
|
image.optional = true
|
||||||
ipaddress.workspace = true
|
ipaddress.workspace = true
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
ldap3.workspace = true
|
|
||||||
ldap3.optional = true
|
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
loole.workspace = true
|
loole.workspace = true
|
||||||
lru-cache.workspace = true
|
lru-cache.workspace = true
|
||||||
|
@ -117,7 +112,6 @@ webpage.optional = true
|
||||||
blurhash.workspace = true
|
blurhash.workspace = true
|
||||||
blurhash.optional = true
|
blurhash.optional = true
|
||||||
recaptcha-verify = { version = "0.1.5", default-features = false }
|
recaptcha-verify = { version = "0.1.5", default-features = false }
|
||||||
ctor.workspace = true
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
|
@ -38,7 +38,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
||||||
|
|
||||||
// Create a user for the server
|
// Create a user for the server
|
||||||
let server_user = services.globals.server_user.as_ref();
|
let server_user = services.globals.server_user.as_ref();
|
||||||
services.users.create(server_user, None, None).await?;
|
services.users.create(server_user, None)?;
|
||||||
|
|
||||||
let create_content = {
|
let create_content = {
|
||||||
use RoomVersionId::*;
|
use RoomVersionId::*;
|
||||||
|
|
|
@ -4,14 +4,14 @@ mod registration_info;
|
||||||
use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc};
|
use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use conduwuit::{Err, Result, err, utils::stream::IterStream};
|
use conduwuit::{Result, err, utils::stream::IterStream};
|
||||||
use database::Map;
|
use database::Map;
|
||||||
use futures::{Future, FutureExt, Stream, TryStreamExt};
|
use futures::{Future, FutureExt, Stream, TryStreamExt};
|
||||||
use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration};
|
use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration};
|
||||||
use tokio::sync::{RwLock, RwLockReadGuard};
|
use tokio::sync::{RwLock, RwLockReadGuard};
|
||||||
|
|
||||||
pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo};
|
pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo};
|
||||||
use crate::{Dep, globals, sending, users};
|
use crate::{Dep, sending};
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
registration_info: RwLock<Registrations>,
|
registration_info: RwLock<Registrations>,
|
||||||
|
@ -20,9 +20,7 @@ pub struct Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Services {
|
struct Services {
|
||||||
globals: Dep<globals::Service>,
|
|
||||||
sending: Dep<sending::Service>,
|
sending: Dep<sending::Service>,
|
||||||
users: Dep<users::Service>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Data {
|
struct Data {
|
||||||
|
@ -37,9 +35,7 @@ impl crate::Service for Service {
|
||||||
Ok(Arc::new(Self {
|
Ok(Arc::new(Self {
|
||||||
registration_info: RwLock::new(BTreeMap::new()),
|
registration_info: RwLock::new(BTreeMap::new()),
|
||||||
services: Services {
|
services: Services {
|
||||||
globals: args.depend::<globals::Service>("globals"),
|
|
||||||
sending: args.depend::<sending::Service>("sending"),
|
sending: args.depend::<sending::Service>("sending"),
|
||||||
users: args.depend::<users::Service>("users"),
|
|
||||||
},
|
},
|
||||||
db: Data {
|
db: Data {
|
||||||
id_appserviceregistrations: args.db["id_appserviceregistrations"].clone(),
|
id_appserviceregistrations: args.db["id_appserviceregistrations"].clone(),
|
||||||
|
@ -48,93 +44,23 @@ impl crate::Service for Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn worker(self: Arc<Self>) -> Result {
|
async fn worker(self: Arc<Self>) -> Result {
|
||||||
// First, collect all appservices to check for token conflicts
|
// Inserting registrations into cache
|
||||||
let appservices: Vec<(String, Registration)> = self.iter_db_ids().try_collect().await?;
|
self.iter_db_ids()
|
||||||
|
.try_for_each(async |appservice| {
|
||||||
|
self.registration_info
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.insert(appservice.0, appservice.1.try_into()?);
|
||||||
|
|
||||||
// Check for appservice-to-appservice token conflicts
|
Ok(())
|
||||||
for i in 0..appservices.len() {
|
})
|
||||||
for j in i.saturating_add(1)..appservices.len() {
|
.await
|
||||||
if appservices[i].1.as_token == appservices[j].1.as_token {
|
|
||||||
return Err!(Database(error!(
|
|
||||||
"Token collision detected: Appservices '{}' and '{}' have the same token",
|
|
||||||
appservices[i].0, appservices[j].0
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process each appservice
|
|
||||||
for (id, registration) in appservices {
|
|
||||||
// During startup, resolve any token collisions in favour of appservices
|
|
||||||
// by logging out conflicting user devices
|
|
||||||
if let Ok((user_id, device_id)) = self
|
|
||||||
.services
|
|
||||||
.users
|
|
||||||
.find_from_token(®istration.as_token)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
conduwuit::warn!(
|
|
||||||
"Token collision detected during startup: Appservice '{}' token was also \
|
|
||||||
used by user '{}' device '{}'. Logging out the user device to resolve \
|
|
||||||
conflict.",
|
|
||||||
id,
|
|
||||||
user_id.localpart(),
|
|
||||||
device_id
|
|
||||||
);
|
|
||||||
|
|
||||||
self.services
|
|
||||||
.users
|
|
||||||
.remove_device(&user_id, &device_id)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.start_appservice(id, registration).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
|
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
/// Starts an appservice, ensuring its sender_localpart user exists and is
|
|
||||||
/// active. Creates the user if it doesn't exist, or reactivates it if it
|
|
||||||
/// was deactivated. Then registers the appservice in memory for request
|
|
||||||
/// handling.
|
|
||||||
async fn start_appservice(&self, id: String, registration: Registration) -> Result {
|
|
||||||
let appservice_user_id = UserId::parse_with_server_name(
|
|
||||||
registration.sender_localpart.as_str(),
|
|
||||||
self.services.globals.server_name(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if !self.services.users.exists(&appservice_user_id).await {
|
|
||||||
self.services
|
|
||||||
.users
|
|
||||||
.create(&appservice_user_id, None, None)
|
|
||||||
.await?;
|
|
||||||
} else if self
|
|
||||||
.services
|
|
||||||
.users
|
|
||||||
.is_deactivated(&appservice_user_id)
|
|
||||||
.await
|
|
||||||
.unwrap_or(false)
|
|
||||||
{
|
|
||||||
// Reactivate the appservice user if it was accidentally deactivated
|
|
||||||
self.services
|
|
||||||
.users
|
|
||||||
.set_password(&appservice_user_id, None)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.registration_info
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.insert(id, registration.try_into()?);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Registers an appservice and returns the ID to the caller
|
/// Registers an appservice and returns the ID to the caller
|
||||||
pub async fn register_appservice(
|
pub async fn register_appservice(
|
||||||
&self,
|
&self,
|
||||||
|
@ -142,40 +68,15 @@ impl Service {
|
||||||
appservice_config_body: &str,
|
appservice_config_body: &str,
|
||||||
) -> Result {
|
) -> Result {
|
||||||
//TODO: Check for collisions between exclusive appservice namespaces
|
//TODO: Check for collisions between exclusive appservice namespaces
|
||||||
|
self.registration_info
|
||||||
// Check for token collision with other appservices (allow re-registration of
|
.write()
|
||||||
// same appservice)
|
|
||||||
if let Ok(existing) = self.find_from_token(®istration.as_token).await {
|
|
||||||
if existing.registration.id != registration.id {
|
|
||||||
return Err(err!(Request(InvalidParam(
|
|
||||||
"Cannot register appservice: Token is already used by appservice '{}'. \
|
|
||||||
Please generate a different token.",
|
|
||||||
existing.registration.id
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prevent token collision with existing user tokens
|
|
||||||
if self
|
|
||||||
.services
|
|
||||||
.users
|
|
||||||
.find_from_token(®istration.as_token)
|
|
||||||
.await
|
.await
|
||||||
.is_ok()
|
.insert(registration.id.clone(), registration.clone().try_into()?);
|
||||||
{
|
|
||||||
return Err(err!(Request(InvalidParam(
|
|
||||||
"Cannot register appservice: The provided token is already in use by a user \
|
|
||||||
device. Please generate a different token for the appservice."
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.db
|
self.db
|
||||||
.id_appserviceregistrations
|
.id_appserviceregistrations
|
||||||
.insert(®istration.id, appservice_config_body);
|
.insert(®istration.id, appservice_config_body);
|
||||||
|
|
||||||
self.start_appservice(registration.id.clone(), registration.clone())
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,14 +113,12 @@ impl Service {
|
||||||
.map(|info| info.registration)
|
.map(|info| info.registration)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns Result to match users::find_from_token for select_ok usage
|
pub async fn find_from_token(&self, token: &str) -> Option<RegistrationInfo> {
|
||||||
pub async fn find_from_token(&self, token: &str) -> Result<RegistrationInfo> {
|
|
||||||
self.read()
|
self.read()
|
||||||
.await
|
.await
|
||||||
.values()
|
.values()
|
||||||
.find(|info| info.registration.as_token == token)
|
.find(|info| info.registration.as_token == token)
|
||||||
.cloned()
|
.cloned()
|
||||||
.ok_or_else(|| err!(Request(NotFound("Appservice token not found"))))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if a given user id matches any exclusive appservice regex
|
/// Checks if a given user id matches any exclusive appservice regex
|
||||||
|
|
|
@ -41,11 +41,6 @@ impl crate::Service for Service {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.services.config.ldap.enable {
|
|
||||||
warn!("emergency password feature not available with LDAP enabled.");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.set_emergency_access().await.inspect_err(|e| {
|
self.set_emergency_access().await.inspect_err(|e| {
|
||||||
error!("Could not set the configured emergency password for the server user: {e}");
|
error!("Could not set the configured emergency password for the server user: {e}");
|
||||||
})
|
})
|
||||||
|
@ -62,8 +57,7 @@ impl Service {
|
||||||
|
|
||||||
self.services
|
self.services
|
||||||
.users
|
.users
|
||||||
.set_password(server_user, self.services.config.emergency_password.as_deref())
|
.set_password(server_user, self.services.config.emergency_password.as_deref())?;
|
||||||
.await?;
|
|
||||||
|
|
||||||
let (ruleset, pwd_set) = match self.services.config.emergency_password {
|
let (ruleset, pwd_set) = match self.services.config.emergency_password {
|
||||||
| Some(_) => (Ruleset::server_default(server_user), true),
|
| Some(_) => (Ruleset::server_default(server_user), true),
|
||||||
|
|
|
@ -215,8 +215,8 @@ async fn db_lt_12(services: &Services) -> Result<()> {
|
||||||
for username in &services
|
for username in &services
|
||||||
.users
|
.users
|
||||||
.list_local_users()
|
.list_local_users()
|
||||||
.map(ToOwned::to_owned)
|
.map(UserId::to_owned)
|
||||||
.collect::<Vec<OwnedUserId>>()
|
.collect::<Vec<_>>()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)
|
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)
|
||||||
|
@ -295,8 +295,8 @@ async fn db_lt_13(services: &Services) -> Result<()> {
|
||||||
for username in &services
|
for username in &services
|
||||||
.users
|
.users
|
||||||
.list_local_users()
|
.list_local_users()
|
||||||
.map(ToOwned::to_owned)
|
.map(UserId::to_owned)
|
||||||
.collect::<Vec<OwnedUserId>>()
|
.collect::<Vec<_>>()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)
|
let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name)
|
||||||
|
|
|
@ -33,7 +33,6 @@ pub mod users;
|
||||||
extern crate conduwuit_core as conduwuit;
|
extern crate conduwuit_core as conduwuit;
|
||||||
extern crate conduwuit_database as database;
|
extern crate conduwuit_database as database;
|
||||||
|
|
||||||
use ctor::{ctor, dtor};
|
|
||||||
pub(crate) use service::{Args, Dep, Service};
|
pub(crate) use service::{Args, Dep, Service};
|
||||||
|
|
||||||
pub use crate::services::Services;
|
pub use crate::services::Services;
|
||||||
|
|
|
@ -183,8 +183,8 @@ impl Service {
|
||||||
.services
|
.services
|
||||||
.users
|
.users
|
||||||
.list_local_users()
|
.list_local_users()
|
||||||
.map(ToOwned::to_owned)
|
.map(UserId::to_owned)
|
||||||
.collect::<Vec<OwnedUserId>>()
|
.collect::<Vec<_>>()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
let presence = self.db.get_presence(user_id).await;
|
let presence = self.db.get_presence(user_id).await;
|
||||||
|
|
|
@ -178,7 +178,7 @@ impl Service {
|
||||||
pub fn get_pushkeys<'a>(
|
pub fn get_pushkeys<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
sender: &'a UserId,
|
sender: &'a UserId,
|
||||||
) -> impl Stream<Item = &'a str> + Send + 'a {
|
) -> impl Stream<Item = &str> + Send + 'a {
|
||||||
let prefix = (sender, Interfix);
|
let prefix = (sender, Interfix);
|
||||||
self.db
|
self.db
|
||||||
.senderkey_pusher
|
.senderkey_pusher
|
||||||
|
|
|
@ -178,7 +178,7 @@ impl Service {
|
||||||
pub fn local_aliases_for_room<'a>(
|
pub fn local_aliases_for_room<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
) -> impl Stream<Item = &'a RoomAliasId> + Send + 'a {
|
) -> impl Stream<Item = &RoomAliasId> + Send + 'a {
|
||||||
let prefix = (room_id, Interfix);
|
let prefix = (room_id, Interfix);
|
||||||
self.db
|
self.db
|
||||||
.aliasid_alias
|
.aliasid_alias
|
||||||
|
@ -188,9 +188,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self), level = "debug")]
|
#[tracing::instrument(skip(self), level = "debug")]
|
||||||
pub fn all_local_aliases<'a>(
|
pub fn all_local_aliases<'a>(&'a self) -> impl Stream<Item = (&RoomId, &str)> + Send + 'a {
|
||||||
&'a self,
|
|
||||||
) -> impl Stream<Item = (&'a RoomId, &'a str)> + Send + 'a {
|
|
||||||
self.db
|
self.db
|
||||||
.alias_roomid
|
.alias_roomid
|
||||||
.stream()
|
.stream()
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue