mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-09-10 00:23:03 +02:00
Compare commits
96 commits
v0.5.0-rc.
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
8f186cd770 | ||
|
5d3e10a048 |
||
|
1e541875ad |
||
|
90fd92977e | ||
|
e27ef7f5ec | ||
|
16f4efa708 | ||
|
e38dec5864 | ||
|
f3824ffc3d | ||
|
e3fbf7a143 | ||
|
09de586dc7 |
||
|
d1fff1d09f |
||
|
f47474d12a |
||
|
53da294e53 |
||
|
2cdccbf2fe |
||
|
6cf3c839e4 | ||
|
4a1091dd06 | ||
|
1e9701f379 | ||
|
2cedf0d2e1 | ||
|
84fdcd326a | ||
|
d640853f9d | ||
|
fff9629b0f | ||
|
1a3107c20a | ||
|
969d7cbb66 | ||
|
cd238b05de |
||
|
c0e3829fed |
||
|
1d7dda6cf5 |
||
|
6f19931c5b |
||
|
2516e783ba | ||
|
fdf5771387 |
||
|
58bbc0e676 | ||
|
0d58e660a2 | ||
|
e7124edb73 | ||
|
d19e0f0d97 | ||
|
467aed3028 | ||
|
99b44bbf09 | ||
|
95aeff8cdc | ||
|
9e62e66ae4 | ||
|
76b93e252d | ||
|
66d479e2eb | ||
|
241371463e | ||
|
d970df5fd2 |
||
|
4e644961f3 |
||
|
35cf9af5c8 |
||
|
04e796176a |
||
|
9783940105 |
||
|
1e430f9470 |
||
|
5cce024841 | ||
|
e87c461b8d |
||
|
b934898f51 |
||
|
83e3de55a4 |
||
|
609e239436 | ||
|
34417c96ae | ||
|
f33f281edb | ||
|
ddbca59193 | ||
|
b5a2e49ae4 | ||
|
37248a4f68 |
||
|
dd22325ea2 | ||
|
30a56d5cb9 |
||
|
3183210459 |
||
|
57d7743037 | ||
|
cb09bfa4e7 | ||
|
0ed691edef | ||
|
c58b9f05ed | ||
|
fb7e739b72 | ||
|
c7adbae03f | ||
|
8b35de6a43 |
||
|
d191494f18 |
||
|
6d1f12b22d |
||
|
ca3ee9224b |
||
|
427b973b67 |
||
|
aacaf5a2a0 | ||
|
256bed992e |
||
|
ecb87ccd1c |
||
|
14a4b24fc5 | ||
|
731761f0fc | ||
|
4524a00fc6 | ||
|
9db750e97c | ||
|
b14a4d470b | ||
|
5d1f141882 | ||
|
b447cfff56 | ||
|
283888e788 | ||
|
f54e59a068 | ||
|
2a183cc5a4 | ||
|
54acd07555 |
||
|
583cb924f1 | ||
|
9286838d23 | ||
|
d1ebcfaf0b | ||
|
e820551f62 | ||
|
bd3db65cb2 |
||
|
e4a43b1a5b |
||
|
5775e0ad9d |
||
|
238cc627e3 |
||
|
b1516209c4 |
||
|
0589884109 |
||
|
4a83df5b57 |
||
|
aa08edc55f |
123 changed files with 3275 additions and 1296 deletions
|
@ -26,3 +26,7 @@ max_line_length = 98
|
||||||
[*.yml]
|
[*.yml]
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
indent_style = space
|
indent_style = space
|
||||||
|
|
||||||
|
[*.json]
|
||||||
|
indent_size = 4
|
||||||
|
indent_style = space
|
||||||
|
|
4
.envrc
4
.envrc
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
dotenv_if_exists
|
dotenv_if_exists
|
||||||
|
|
||||||
# use flake ".#${DIRENV_DEVSHELL:-default}"
|
if [ -f /etc/os-release ] && grep -q '^ID=nixos' /etc/os-release; then
|
||||||
|
use flake ".#${DIRENV_DEVSHELL:-default}"
|
||||||
|
fi
|
||||||
|
|
||||||
PATH_add bin
|
PATH_add bin
|
||||||
|
|
58
.forgejo/actions/detect-runner-os/action.yml
Normal file
58
.forgejo/actions/detect-runner-os/action.yml
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
name: detect-runner-os
|
||||||
|
description: |
|
||||||
|
Detect the actual OS name and version of the runner.
|
||||||
|
Provides separate outputs for name, version, and a combined slug.
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
name:
|
||||||
|
description: 'OS name (e.g. Ubuntu, Debian)'
|
||||||
|
value: ${{ steps.detect.outputs.name }}
|
||||||
|
version:
|
||||||
|
description: 'OS version (e.g. 22.04, 11)'
|
||||||
|
value: ${{ steps.detect.outputs.version }}
|
||||||
|
slug:
|
||||||
|
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
|
||||||
|
value: ${{ steps.detect.outputs.slug }}
|
||||||
|
node_major:
|
||||||
|
description: 'Major version of Node.js if available (e.g. 22)'
|
||||||
|
value: ${{ steps.detect.outputs.node_major }}
|
||||||
|
node_version:
|
||||||
|
description: 'Full Node.js version if available (e.g. 22.19.0)'
|
||||||
|
value: ${{ steps.detect.outputs.node_version }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Detect runner OS
|
||||||
|
id: detect
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# Detect OS version (try lsb_release first, fall back to /etc/os-release)
|
||||||
|
OS_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d'"' -f2)
|
||||||
|
|
||||||
|
# Detect OS name and capitalise (try lsb_release first, fall back to /etc/os-release)
|
||||||
|
OS_NAME=$(lsb_release -is 2>/dev/null || grep "^ID=" /etc/os-release | cut -d'=' -f2 | tr -d '"' | sed 's/\b\(.\)/\u\1/g')
|
||||||
|
|
||||||
|
# Create combined slug
|
||||||
|
OS_SLUG="${OS_NAME}-${OS_VERSION}"
|
||||||
|
|
||||||
|
# Detect Node.js version if available
|
||||||
|
if command -v node >/dev/null 2>&1; then
|
||||||
|
NODE_VERSION=$(node --version | sed 's/v//')
|
||||||
|
NODE_MAJOR=$(echo $NODE_VERSION | cut -d. -f1)
|
||||||
|
echo "node_version=${NODE_VERSION}" >> $GITHUB_OUTPUT
|
||||||
|
echo "node_major=${NODE_MAJOR}" >> $GITHUB_OUTPUT
|
||||||
|
echo "🔍 Detected Node.js: v${NODE_VERSION}"
|
||||||
|
else
|
||||||
|
echo "node_version=" >> $GITHUB_OUTPUT
|
||||||
|
echo "node_major=" >> $GITHUB_OUTPUT
|
||||||
|
echo "🔍 Node.js not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set OS outputs
|
||||||
|
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
|
||||||
|
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
|
||||||
|
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Log detection results
|
||||||
|
echo "🔍 Detected Runner OS: ${OS_NAME} ${OS_VERSION}"
|
|
@ -1,27 +0,0 @@
|
||||||
name: prefligit
|
|
||||||
description: |
|
|
||||||
Runs prefligit, pre-commit reimplemented in Rust.
|
|
||||||
inputs:
|
|
||||||
extra_args:
|
|
||||||
description: options to pass to pre-commit run
|
|
||||||
required: false
|
|
||||||
default: '--all-files'
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Install uv
|
|
||||||
uses: https://github.com/astral-sh/setup-uv@v6
|
|
||||||
with:
|
|
||||||
enable-cache: true
|
|
||||||
ignore-nothing-to-cache: true
|
|
||||||
- name: Install Prefligit
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
curl --proto '=https' --tlsv1.2 -LsSf https://github.com/j178/prefligit/releases/download/v0.0.10/prefligit-installer.sh | sh
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/.cache/prefligit
|
|
||||||
key: prefligit-0|${{ hashFiles('.pre-commit-config.yaml') }}
|
|
||||||
- run: prefligit run --show-diff-on-failure --color=always -v ${{ inputs.extra_args }}
|
|
||||||
shell: bash
|
|
|
@ -2,18 +2,12 @@ name: sccache
|
||||||
description: |
|
description: |
|
||||||
Install sccache for caching builds in GitHub Actions.
|
Install sccache for caching builds in GitHub Actions.
|
||||||
|
|
||||||
inputs:
|
|
||||||
token:
|
|
||||||
description: 'A Github PAT'
|
|
||||||
required: false
|
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Install sccache
|
- name: Install sccache
|
||||||
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
|
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||||
with:
|
|
||||||
token: ${{ inputs.token }}
|
|
||||||
- name: Configure sccache
|
- name: Configure sccache
|
||||||
uses: https://github.com/actions/github-script@v7
|
uses: https://github.com/actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
|
|
167
.forgejo/actions/setup-llvm-with-apt/action.yml
Normal file
167
.forgejo/actions/setup-llvm-with-apt/action.yml
Normal file
|
@ -0,0 +1,167 @@
|
||||||
|
name: setup-llvm-with-apt
|
||||||
|
description: |
|
||||||
|
Set up LLVM toolchain with APT package management and smart caching.
|
||||||
|
Supports cross-compilation architectures and additional package installation.
|
||||||
|
|
||||||
|
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
dpkg-arch:
|
||||||
|
description: 'Debian architecture for cross-compilation (e.g. arm64)'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
extra-packages:
|
||||||
|
description: 'Additional APT packages to install (space-separated)'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
llvm-version:
|
||||||
|
description: 'LLVM version to install'
|
||||||
|
required: false
|
||||||
|
default: '20'
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
llvm-version:
|
||||||
|
description: 'Installed LLVM version'
|
||||||
|
value: ${{ steps.configure.outputs.version }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Detect runner OS
|
||||||
|
id: runner-os
|
||||||
|
uses: ./.forgejo/actions/detect-runner-os
|
||||||
|
|
||||||
|
- name: Configure cross-compilation architecture
|
||||||
|
if: inputs.dpkg-arch != ''
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
|
||||||
|
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
|
||||||
|
|
||||||
|
# Restrict default sources to amd64
|
||||||
|
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
|
||||||
|
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
|
||||||
|
|
||||||
|
# Add ports sources for foreign architecture
|
||||||
|
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
|
||||||
|
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
|
||||||
|
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
|
||||||
|
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
|
||||||
|
|
||||||
|
- name: Start LLVM cache group
|
||||||
|
shell: bash
|
||||||
|
run: echo "::group::📦 Restoring LLVM cache"
|
||||||
|
|
||||||
|
- name: Check for LLVM cache
|
||||||
|
id: cache
|
||||||
|
uses: https://github.com/actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/usr/bin/clang-*
|
||||||
|
/usr/bin/clang++-*
|
||||||
|
/usr/bin/lld-*
|
||||||
|
/usr/bin/llvm-*
|
||||||
|
/usr/lib/llvm-*/
|
||||||
|
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
|
||||||
|
/usr/lib/x86_64-linux-gnu/libclang*.so*
|
||||||
|
/etc/apt/sources.list.d/archive_uri-*
|
||||||
|
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
|
||||||
|
key: llvm-${{ steps.runner-os.outputs.slug }}-v${{ inputs.llvm-version }}-v3-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
|
||||||
|
|
||||||
|
- name: End LLVM cache group
|
||||||
|
shell: bash
|
||||||
|
run: echo "::endgroup::"
|
||||||
|
|
||||||
|
- name: Check and install LLVM if needed
|
||||||
|
id: llvm-setup
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
|
||||||
|
|
||||||
|
# Check both binaries and libraries exist
|
||||||
|
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
|
||||||
|
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
|
||||||
|
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
|
||||||
|
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
|
||||||
|
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
|
||||||
|
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
|
||||||
|
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
|
||||||
|
echo "needs-install=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
|
||||||
|
|
||||||
|
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
|
||||||
|
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
|
||||||
|
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
|
||||||
|
echo "needs-install=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Prepare for additional packages
|
||||||
|
if: inputs.extra-packages != ''
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# Update APT if LLVM was cached (installer script already does apt-get update)
|
||||||
|
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
|
||||||
|
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
|
||||||
|
sudo apt-get update
|
||||||
|
echo "::endgroup::"
|
||||||
|
fi
|
||||||
|
echo "::group::📦 Installing additional packages"
|
||||||
|
|
||||||
|
- name: Install additional packages
|
||||||
|
if: inputs.extra-packages != ''
|
||||||
|
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
|
||||||
|
with:
|
||||||
|
packages: ${{ inputs.extra-packages }}
|
||||||
|
version: 1.0
|
||||||
|
|
||||||
|
- name: End package installation group
|
||||||
|
if: inputs.extra-packages != ''
|
||||||
|
shell: bash
|
||||||
|
run: echo "::endgroup::"
|
||||||
|
|
||||||
|
- name: Configure LLVM environment
|
||||||
|
id: configure
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
|
||||||
|
|
||||||
|
# Create symlinks
|
||||||
|
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
|
||||||
|
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
|
||||||
|
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
|
||||||
|
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
|
||||||
|
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
|
||||||
|
echo " ✓ Created symlinks"
|
||||||
|
|
||||||
|
# Setup library paths
|
||||||
|
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
|
||||||
|
if [ -d "$LLVM_LIB_PATH" ]; then
|
||||||
|
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
|
||||||
|
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
|
||||||
|
sudo ldconfig
|
||||||
|
echo " ✓ Configured library paths"
|
||||||
|
else
|
||||||
|
# Fallback to standard library location
|
||||||
|
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
|
||||||
|
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
|
||||||
|
echo " ✓ Using fallback library path"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set output
|
||||||
|
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
|
||||||
|
echo "::endgroup::"
|
||||||
|
echo "✅ LLVM ready: $(clang --version | head -1)"
|
226
.forgejo/actions/setup-rust/action.yml
Normal file
226
.forgejo/actions/setup-rust/action.yml
Normal file
|
@ -0,0 +1,226 @@
|
||||||
|
name: setup-rust
|
||||||
|
description: |
|
||||||
|
Set up Rust toolchain with sccache for compilation caching.
|
||||||
|
Respects rust-toolchain.toml by default or accepts explicit version override.
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
cache-key-suffix:
|
||||||
|
description: 'Optional suffix for cache keys (e.g. platform identifier)'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
rust-components:
|
||||||
|
description: 'Additional Rust components to install (space-separated)'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
rust-target:
|
||||||
|
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
rust-version:
|
||||||
|
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
|
||||||
|
required: false
|
||||||
|
default: '1.87.0'
|
||||||
|
sccache-cache-limit:
|
||||||
|
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
|
||||||
|
required: false
|
||||||
|
default: '2G'
|
||||||
|
github-token:
|
||||||
|
description: 'GitHub token for downloading sccache from GitHub releases'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
rust-version:
|
||||||
|
description: 'Installed Rust version'
|
||||||
|
value: ${{ steps.rust-setup.outputs.version }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Detect runner OS
|
||||||
|
id: runner-os
|
||||||
|
uses: ./.forgejo/actions/detect-runner-os
|
||||||
|
|
||||||
|
- name: Configure Cargo environment
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# Use workspace-relative paths for better control and consistency
|
||||||
|
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
|
||||||
|
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
|
||||||
|
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
|
||||||
|
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Limit binstall resolution timeout to avoid GitHub rate limit delays
|
||||||
|
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Ensure directories exist for first run
|
||||||
|
mkdir -p "${{ github.workspace }}/.cargo"
|
||||||
|
mkdir -p "${{ github.workspace }}/.sccache"
|
||||||
|
mkdir -p "${{ github.workspace }}/target"
|
||||||
|
mkdir -p "${{ github.workspace }}/.rustup"
|
||||||
|
|
||||||
|
- name: Start cache restore group
|
||||||
|
shell: bash
|
||||||
|
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
|
||||||
|
|
||||||
|
- name: Cache Cargo registry and git
|
||||||
|
id: registry-cache
|
||||||
|
uses: https://github.com/actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
.cargo/registry/index
|
||||||
|
.cargo/registry/cache
|
||||||
|
.cargo/git/db
|
||||||
|
# Registry cache saved per workflow, restored from any workflow's cache
|
||||||
|
# Each workflow maintains its own registry that accumulates its needed crates
|
||||||
|
key: cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ github.workflow }}
|
||||||
|
restore-keys: |
|
||||||
|
cargo-registry-${{ steps.runner-os.outputs.slug }}-
|
||||||
|
|
||||||
|
- name: Cache toolchain binaries
|
||||||
|
id: toolchain-cache
|
||||||
|
uses: https://github.com/actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
.cargo/bin
|
||||||
|
.rustup/toolchains
|
||||||
|
.rustup/update-hashes
|
||||||
|
# Shared toolchain cache across all Rust versions
|
||||||
|
key: toolchain-${{ steps.runner-os.outputs.slug }}
|
||||||
|
|
||||||
|
|
||||||
|
- name: Setup sccache
|
||||||
|
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||||
|
|
||||||
|
- name: Cache build artifacts
|
||||||
|
id: build-cache
|
||||||
|
uses: https://github.com/actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
target/**/deps
|
||||||
|
!target/**/deps/*.rlib
|
||||||
|
target/**/build
|
||||||
|
target/**/.fingerprint
|
||||||
|
target/**/incremental
|
||||||
|
target/**/*.d
|
||||||
|
/timelord/
|
||||||
|
# Build artifacts - cache per code change, restore from deps when code changes
|
||||||
|
key: >-
|
||||||
|
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
|
||||||
|
restore-keys: |
|
||||||
|
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
|
||||||
|
|
||||||
|
- name: End cache restore group
|
||||||
|
shell: bash
|
||||||
|
run: echo "::endgroup::"
|
||||||
|
|
||||||
|
- name: Setup Rust toolchain
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# Install rustup if not already cached
|
||||||
|
if ! command -v rustup &> /dev/null; then
|
||||||
|
echo "::group::📦 Installing rustup"
|
||||||
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
|
||||||
|
source "$CARGO_HOME/env"
|
||||||
|
echo "::endgroup::"
|
||||||
|
else
|
||||||
|
echo "✅ rustup already available"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Setup the appropriate Rust version
|
||||||
|
if [[ -n "${{ inputs.rust-version }}" ]]; then
|
||||||
|
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
|
||||||
|
# Set override first to prevent rust-toolchain.toml from auto-installing
|
||||||
|
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
|
||||||
|
|
||||||
|
# Check if we need to install/update the toolchain
|
||||||
|
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
|
||||||
|
rustup update ${{ inputs.rust-version }}
|
||||||
|
else
|
||||||
|
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
|
||||||
|
rustup show
|
||||||
|
fi
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
- name: Configure PATH and install tools
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||||
|
run: |
|
||||||
|
# Add .cargo/bin to PATH permanently for all subsequent steps
|
||||||
|
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
|
||||||
|
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
|
||||||
|
|
||||||
|
# Install cargo-binstall for fast binary installations
|
||||||
|
if command -v cargo-binstall &> /dev/null; then
|
||||||
|
echo "✅ cargo-binstall already available"
|
||||||
|
else
|
||||||
|
echo "::group::📦 Installing cargo-binstall"
|
||||||
|
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||||
|
echo "::endgroup::"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if command -v prek &> /dev/null; then
|
||||||
|
echo "✅ prek already available"
|
||||||
|
else
|
||||||
|
echo "::group::📦 Installing prek"
|
||||||
|
# prek isn't regularly published to crates.io, so we use git source
|
||||||
|
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
|
||||||
|
echo "::endgroup::"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if command -v timelord &> /dev/null; then
|
||||||
|
echo "✅ timelord already available"
|
||||||
|
else
|
||||||
|
echo "::group::📦 Installing timelord"
|
||||||
|
cargo-binstall -y --no-symlinks timelord-cli
|
||||||
|
echo "::endgroup::"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Configure sccache environment
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
||||||
|
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||||
|
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||||
|
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||||
|
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Configure incremental compilation GC
|
||||||
|
# If we restored from old cache (partial hit), clean up aggressively
|
||||||
|
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
|
||||||
|
echo "♻️ Partial cache hit - enabling cache cleanup"
|
||||||
|
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install Rust components
|
||||||
|
if: inputs.rust-components != ''
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "📦 Installing components: ${{ inputs.rust-components }}"
|
||||||
|
rustup component add ${{ inputs.rust-components }}
|
||||||
|
|
||||||
|
- name: Install Rust target
|
||||||
|
if: inputs.rust-target != ''
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "📦 Installing target: ${{ inputs.rust-target }}"
|
||||||
|
rustup target add ${{ inputs.rust-target }}
|
||||||
|
|
||||||
|
- name: Output version and summary
|
||||||
|
id: rust-setup
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
|
||||||
|
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
echo "📋 Setup complete:"
|
||||||
|
echo " Rust: $(rustc --version)"
|
||||||
|
echo " Cargo: $(cargo --version)"
|
||||||
|
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
|
||||||
|
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"
|
|
@ -49,10 +49,23 @@ jobs:
|
||||||
cp ./docs/static/_headers ./public/_headers
|
cp ./docs/static/_headers ./public/_headers
|
||||||
echo "Copied .well-known files and _headers to ./public"
|
echo "Copied .well-known files and _headers to ./public"
|
||||||
|
|
||||||
|
- name: Detect runner environment
|
||||||
|
id: runner-env
|
||||||
|
uses: ./.forgejo/actions/detect-runner-os
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
|
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
|
||||||
uses: https://github.com/actions/setup-node@v4
|
uses: https://github.com/actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 20
|
node-version: 22
|
||||||
|
|
||||||
|
- name: Cache npm dependencies
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.npm
|
||||||
|
key: ${{ steps.runner-env.outputs.slug }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ steps.runner-env.outputs.slug }}-node-
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: npm install --save-dev wrangler@latest
|
run: npm install --save-dev wrangler@latest
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
name: Checks / Prefligit
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
pull_request:
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
prefligit:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }}
|
|
||||||
TO_REF: ${{ github.sha }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- uses: ./.forgejo/actions/prefligit
|
|
||||||
with:
|
|
||||||
extra_args: --all-files --hook-stage manual
|
|
83
.forgejo/workflows/prek-checks.yml
Normal file
83
.forgejo/workflows/prek-checks.yml
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
name: Checks / Prek
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
fast-checks:
|
||||||
|
name: Pre-commit & Formatting
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Setup Rust nightly
|
||||||
|
uses: ./.forgejo/actions/setup-rust
|
||||||
|
with:
|
||||||
|
rust-version: nightly
|
||||||
|
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
||||||
|
|
||||||
|
- name: Run prek
|
||||||
|
run: |
|
||||||
|
prek run \
|
||||||
|
--all-files \
|
||||||
|
--hook-stage manual \
|
||||||
|
--show-diff-on-failure \
|
||||||
|
--color=always \
|
||||||
|
-v
|
||||||
|
|
||||||
|
- name: Check Rust formatting
|
||||||
|
run: |
|
||||||
|
cargo +nightly fmt --all -- --check && \
|
||||||
|
echo "✅ Formatting check passed" || \
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
clippy-and-tests:
|
||||||
|
name: Clippy and Cargo Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Setup LLVM
|
||||||
|
uses: ./.forgejo/actions/setup-llvm-with-apt
|
||||||
|
with:
|
||||||
|
extra-packages: liburing-dev liburing2
|
||||||
|
|
||||||
|
- name: Setup Rust with caching
|
||||||
|
uses: ./.forgejo/actions/setup-rust
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
||||||
|
|
||||||
|
- name: Run Clippy lints
|
||||||
|
run: |
|
||||||
|
cargo clippy \
|
||||||
|
--workspace \
|
||||||
|
--features full \
|
||||||
|
--locked \
|
||||||
|
--no-deps \
|
||||||
|
--profile test \
|
||||||
|
-- \
|
||||||
|
-D warnings
|
||||||
|
|
||||||
|
- name: Run Cargo tests
|
||||||
|
run: |
|
||||||
|
cargo test \
|
||||||
|
--workspace \
|
||||||
|
--features full \
|
||||||
|
--locked \
|
||||||
|
--profile test \
|
||||||
|
--all-targets \
|
||||||
|
--no-fail-fast
|
|
@ -3,15 +3,25 @@ concurrency:
|
||||||
group: "release-image-${{ github.ref }}"
|
group: "release-image-${{ github.ref }}"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
pull_request:
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- "*.md"
|
- "*.md"
|
||||||
- "**/*.md"
|
- "**/*.md"
|
||||||
- ".gitlab-ci.yml"
|
- ".gitlab-ci.yml"
|
||||||
- ".gitignore"
|
- ".gitignore"
|
||||||
- "renovate.json"
|
- "renovate.json"
|
||||||
- "debian/**"
|
- "pkg/**"
|
||||||
- "docker/**"
|
- "docs/**"
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths-ignore:
|
||||||
|
- "*.md"
|
||||||
|
- "**/*.md"
|
||||||
|
- ".gitlab-ci.yml"
|
||||||
|
- ".gitignore"
|
||||||
|
- "renovate.json"
|
||||||
|
- "pkg/**"
|
||||||
- "docs/**"
|
- "docs/**"
|
||||||
# Allows you to run this workflow manually from the Actions tab
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
@ -43,6 +53,9 @@ jobs:
|
||||||
let images = []
|
let images = []
|
||||||
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
||||||
images.push(builtinImage)
|
images.push(builtinImage)
|
||||||
|
} else {
|
||||||
|
// Fallback to official registry for forks/PRs without credentials
|
||||||
|
images.push('forgejo.ellis.link/continuwuation/continuwuity')
|
||||||
}
|
}
|
||||||
core.setOutput('images', images.join("\n"))
|
core.setOutput('images', images.join("\n"))
|
||||||
core.setOutput('images_list', images.join(","))
|
core.setOutput('images_list', images.join(","))
|
||||||
|
@ -88,15 +101,22 @@ jobs:
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Install rust
|
- name: Install rust
|
||||||
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
id: rust-toolchain
|
id: rust-toolchain
|
||||||
uses: ./.forgejo/actions/rust-toolchain
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||||
|
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||||
|
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||||
- name: Login to builtin registry
|
- name: Login to builtin registry
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||||
|
@ -122,15 +142,21 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
||||||
|
echo "Short SHA: $calculatedSha"
|
||||||
- name: Get Git commit timestamps
|
- name: Get Git commit timestamps
|
||||||
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
|
run: |
|
||||||
|
timestamp=$(git log -1 --pretty=%ct)
|
||||||
|
echo "TIMESTAMP=$timestamp" >> $GITHUB_ENV
|
||||||
|
echo "Commit timestamp: $timestamp"
|
||||||
|
|
||||||
- uses: ./.forgejo/actions/timelord
|
- uses: ./.forgejo/actions/timelord
|
||||||
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
with:
|
with:
|
||||||
key: timelord-v0
|
key: timelord-v0
|
||||||
path: .
|
path: .
|
||||||
|
|
||||||
- name: Cache Rust registry
|
- name: Cache Rust registry
|
||||||
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
|
@ -140,6 +166,7 @@ jobs:
|
||||||
.cargo/registry/src
|
.cargo/registry/src
|
||||||
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
|
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
|
||||||
- name: Cache cargo target
|
- name: Cache cargo target
|
||||||
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
id: cache-cargo-target
|
id: cache-cargo-target
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
|
@ -147,6 +174,7 @@ jobs:
|
||||||
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||||
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
|
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
|
||||||
- name: Cache apt cache
|
- name: Cache apt cache
|
||||||
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
id: cache-apt
|
id: cache-apt
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
|
@ -154,6 +182,7 @@ jobs:
|
||||||
var-cache-apt-${{ matrix.slug }}
|
var-cache-apt-${{ matrix.slug }}
|
||||||
key: var-cache-apt-${{ matrix.slug }}
|
key: var-cache-apt-${{ matrix.slug }}
|
||||||
- name: Cache apt lib
|
- name: Cache apt lib
|
||||||
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
id: cache-apt-lib
|
id: cache-apt-lib
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
|
@ -161,7 +190,8 @@ jobs:
|
||||||
var-lib-apt-${{ matrix.slug }}
|
var-lib-apt-${{ matrix.slug }}
|
||||||
key: var-lib-apt-${{ matrix.slug }}
|
key: var-lib-apt-${{ matrix.slug }}
|
||||||
- name: inject cache into docker
|
- name: inject cache into docker
|
||||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0
|
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||||
|
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
|
||||||
with:
|
with:
|
||||||
cache-map: |
|
cache-map: |
|
||||||
{
|
{
|
||||||
|
@ -183,7 +213,7 @@ jobs:
|
||||||
context: .
|
context: .
|
||||||
file: "docker/Dockerfile"
|
file: "docker/Dockerfile"
|
||||||
build-args: |
|
build-args: |
|
||||||
GIT_COMMIT_HASH=${{ github.sha }})
|
GIT_COMMIT_HASH=${{ github.sha }}
|
||||||
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
||||||
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
||||||
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
||||||
|
@ -193,27 +223,23 @@ jobs:
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
# cache-to: type=gha,mode=max
|
# cache-to: type=gha,mode=max
|
||||||
sbom: true
|
sbom: true
|
||||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
outputs: |
|
||||||
|
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', needs.define-variables.outputs.images_list) || format('type=image,"name={0}",push=false', needs.define-variables.outputs.images_list) }}
|
||||||
|
type=local,dest=/tmp/binaries
|
||||||
env:
|
env:
|
||||||
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
||||||
|
|
||||||
# For publishing multi-platform manifests
|
# For publishing multi-platform manifests
|
||||||
- name: Export digest
|
- name: Export digest
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
run: |
|
run: |
|
||||||
mkdir -p /tmp/digests
|
mkdir -p /tmp/digests
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
digest="${{ steps.build.outputs.digest }}"
|
||||||
touch "/tmp/digests/${digest#sha256:}"
|
touch "/tmp/digests/${digest#sha256:}"
|
||||||
|
|
||||||
- name: Extract binary from container (image)
|
# Binary extracted via local output for all builds
|
||||||
id: extract-binary-image
|
- name: Rename extracted binary
|
||||||
run: |
|
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||||
mkdir -p /tmp/binaries
|
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
|
||||||
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.define-variables.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
|
|
||||||
- name: Extract binary from container (copy)
|
|
||||||
run: docker cp ${{ steps.extract-binary-image.outputs.container_id }}:/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
|
||||||
- name: Extract binary from container (cleanup)
|
|
||||||
run: docker rm ${{ steps.extract-binary-image.outputs.container_id }}
|
|
||||||
|
|
||||||
- name: Upload binary artifact
|
- name: Upload binary artifact
|
||||||
uses: forgejo/upload-artifact@v4
|
uses: forgejo/upload-artifact@v4
|
||||||
|
@ -223,6 +249,7 @@ jobs:
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
|
|
||||||
- name: Upload digest
|
- name: Upload digest
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
uses: forgejo/upload-artifact@v4
|
uses: forgejo/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: digests-${{ matrix.slug }}
|
name: digests-${{ matrix.slug }}
|
||||||
|
@ -235,6 +262,7 @@ jobs:
|
||||||
needs: [define-variables, build-image]
|
needs: [define-variables, build-image]
|
||||||
steps:
|
steps:
|
||||||
- name: Download digests
|
- name: Download digests
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
uses: forgejo/download-artifact@v4
|
uses: forgejo/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
path: /tmp/digests
|
path: /tmp/digests
|
||||||
|
@ -242,6 +270,7 @@ jobs:
|
||||||
merge-multiple: true
|
merge-multiple: true
|
||||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||||
- name: Login to builtin registry
|
- name: Login to builtin registry
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||||
|
@ -249,9 +278,15 @@ jobs:
|
||||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||||
|
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||||
|
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
||||||
|
|
||||||
- name: Extract metadata (tags) for Docker
|
- name: Extract metadata (tags) for Docker
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
|
@ -269,6 +304,7 @@ jobs:
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||||
|
|
||||||
- name: Create manifest list and push
|
- name: Create manifest list and push
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
working-directory: /tmp/digests
|
working-directory: /tmp/digests
|
||||||
env:
|
env:
|
||||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||||
|
@ -286,6 +322,7 @@ jobs:
|
||||||
done
|
done
|
||||||
|
|
||||||
- name: Inspect image
|
- name: Inspect image
|
||||||
|
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||||
env:
|
env:
|
||||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
111
.forgejo/workflows/renovate.yml
Normal file
111
.forgejo/workflows/renovate.yml
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
name: Maintenance / Renovate
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# Run at 5am UTC daily to avoid late-night dev
|
||||||
|
- cron: '0 5 * * *'
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
dryRun:
|
||||||
|
description: 'Dry run mode'
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- null
|
||||||
|
- 'extract'
|
||||||
|
- 'lookup'
|
||||||
|
- 'full'
|
||||||
|
logLevel:
|
||||||
|
description: 'Log level'
|
||||||
|
required: false
|
||||||
|
default: 'info'
|
||||||
|
type: choice
|
||||||
|
options:
|
||||||
|
- 'info'
|
||||||
|
- 'warning'
|
||||||
|
- 'critical'
|
||||||
|
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
# Re-run when config changes
|
||||||
|
- '.forgejo/workflows/renovate.yml'
|
||||||
|
- 'renovate.json'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
renovate:
|
||||||
|
name: Renovate
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ghcr.io/renovatebot/renovate:41
|
||||||
|
options: --tmpfs /tmp:exec
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
show-progress: false
|
||||||
|
|
||||||
|
- name: print node heap
|
||||||
|
run: /usr/local/renovate/node -e 'console.log(`node heap limit = ${require("v8").getHeapStatistics().heap_size_limit / (1024 * 1024)} Mb`)'
|
||||||
|
|
||||||
|
- name: Restore renovate repo cache
|
||||||
|
uses: https://github.com/actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/tmp/renovate/cache/renovate/repository
|
||||||
|
key: repo-cache-${{ github.run_id }}
|
||||||
|
restore-keys: |
|
||||||
|
repo-cache-
|
||||||
|
|
||||||
|
- name: Restore renovate package cache
|
||||||
|
uses: https://github.com/actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/tmp/renovate/cache/renovate/renovate-cache-sqlite
|
||||||
|
key: package-cache-${{ github.run_id }}
|
||||||
|
restore-keys: |
|
||||||
|
package-cache-
|
||||||
|
|
||||||
|
- name: Self-hosted Renovate
|
||||||
|
uses: https://github.com/renovatebot/github-action@v43.0.11
|
||||||
|
env:
|
||||||
|
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
|
||||||
|
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
|
||||||
|
|
||||||
|
RENOVATE_PLATFORM: forgejo
|
||||||
|
RENOVATE_ENDPOINT: ${{ github.server_url }}
|
||||||
|
RENOVATE_AUTODISCOVER: 'false'
|
||||||
|
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
|
||||||
|
|
||||||
|
RENOVATE_GIT_TIMEOUT: 60000
|
||||||
|
|
||||||
|
RENOVATE_REQUIRE_CONFIG: 'required'
|
||||||
|
RENOVATE_ONBOARDING: 'false'
|
||||||
|
|
||||||
|
RENOVATE_PR_COMMITS_PER_RUN_LIMIT: 3
|
||||||
|
|
||||||
|
RENOVATE_GITHUB_TOKEN_WARN: 'false'
|
||||||
|
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
|
||||||
|
GITHUB_COM_TOKEN: ${{ secrets.GH_PUBLIC_RO }}
|
||||||
|
|
||||||
|
RENOVATE_REPOSITORY_CACHE: 'enabled'
|
||||||
|
RENOVATE_X_SQLITE_PACKAGE_CACHE: true
|
||||||
|
|
||||||
|
- name: Save renovate repo cache
|
||||||
|
if: always() && env.RENOVATE_DRY_RUN != 'full'
|
||||||
|
uses: https://github.com/actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/tmp/renovate/cache/renovate/repository
|
||||||
|
key: repo-cache-${{ github.run_id }}
|
||||||
|
|
||||||
|
- name: Save renovate package cache
|
||||||
|
if: always() && env.RENOVATE_DRY_RUN != 'full'
|
||||||
|
uses: https://github.com/actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/tmp/renovate/cache/renovate/renovate-cache-sqlite
|
||||||
|
key: package-cache-${{ github.run_id }}
|
|
@ -1,144 +0,0 @@
|
||||||
name: Checks / Rust
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
format:
|
|
||||||
name: Format
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Install rust
|
|
||||||
uses: ./.forgejo/actions/rust-toolchain
|
|
||||||
with:
|
|
||||||
toolchain: "nightly"
|
|
||||||
components: "rustfmt"
|
|
||||||
|
|
||||||
- name: Check formatting
|
|
||||||
run: |
|
|
||||||
cargo +nightly fmt --all -- --check
|
|
||||||
|
|
||||||
clippy:
|
|
||||||
name: Clippy
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Install rust
|
|
||||||
uses: ./.forgejo/actions/rust-toolchain
|
|
||||||
|
|
||||||
- uses: https://github.com/actions/create-github-app-token@v2
|
|
||||||
id: app-token
|
|
||||||
with:
|
|
||||||
app-id: ${{ vars.GH_APP_ID }}
|
|
||||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
|
||||||
github-api-url: https://api.github.com
|
|
||||||
owner: ${{ vars.GH_APP_OWNER }}
|
|
||||||
repositories: ""
|
|
||||||
- name: Install sccache
|
|
||||||
uses: ./.forgejo/actions/sccache
|
|
||||||
with:
|
|
||||||
token: ${{ steps.app-token.outputs.token }}
|
|
||||||
- run: sudo apt-get update
|
|
||||||
- name: Install system dependencies
|
|
||||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
|
||||||
with:
|
|
||||||
packages: clang liburing-dev
|
|
||||||
version: 1
|
|
||||||
- name: Cache Rust registry
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/git
|
|
||||||
!~/.cargo/git/checkouts
|
|
||||||
~/.cargo/registry
|
|
||||||
!~/.cargo/registry/src
|
|
||||||
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Timelord
|
|
||||||
uses: ./.forgejo/actions/timelord
|
|
||||||
with:
|
|
||||||
key: sccache-v0
|
|
||||||
path: .
|
|
||||||
- name: Clippy
|
|
||||||
run: |
|
|
||||||
cargo clippy \
|
|
||||||
--workspace \
|
|
||||||
--features full \
|
|
||||||
--locked \
|
|
||||||
--no-deps \
|
|
||||||
--profile test \
|
|
||||||
-- \
|
|
||||||
-D warnings
|
|
||||||
|
|
||||||
- name: Show sccache stats
|
|
||||||
if: always()
|
|
||||||
run: sccache --show-stats
|
|
||||||
|
|
||||||
cargo-test:
|
|
||||||
name: Cargo Test
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Install rust
|
|
||||||
uses: ./.forgejo/actions/rust-toolchain
|
|
||||||
|
|
||||||
- uses: https://github.com/actions/create-github-app-token@v2
|
|
||||||
id: app-token
|
|
||||||
with:
|
|
||||||
app-id: ${{ vars.GH_APP_ID }}
|
|
||||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
|
||||||
github-api-url: https://api.github.com
|
|
||||||
owner: ${{ vars.GH_APP_OWNER }}
|
|
||||||
repositories: ""
|
|
||||||
- name: Install sccache
|
|
||||||
uses: ./.forgejo/actions/sccache
|
|
||||||
with:
|
|
||||||
token: ${{ steps.app-token.outputs.token }}
|
|
||||||
- run: sudo apt-get update
|
|
||||||
- name: Install system dependencies
|
|
||||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
|
||||||
with:
|
|
||||||
packages: clang liburing-dev
|
|
||||||
version: 1
|
|
||||||
- name: Cache Rust registry
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/git
|
|
||||||
!~/.cargo/git/checkouts
|
|
||||||
~/.cargo/registry
|
|
||||||
!~/.cargo/registry/src
|
|
||||||
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Timelord
|
|
||||||
uses: ./.forgejo/actions/timelord
|
|
||||||
with:
|
|
||||||
key: sccache-v0
|
|
||||||
path: .
|
|
||||||
- name: Cargo Test
|
|
||||||
run: |
|
|
||||||
cargo test \
|
|
||||||
--workspace \
|
|
||||||
--features full \
|
|
||||||
--locked \
|
|
||||||
--profile test \
|
|
||||||
--all-targets \
|
|
||||||
--no-fail-fast
|
|
||||||
|
|
||||||
- name: Show sccache stats
|
|
||||||
if: always()
|
|
||||||
run: sccache --show-stats
|
|
5
.github/FUNDING.yml
vendored
5
.github/FUNDING.yml
vendored
|
@ -1,5 +1,4 @@
|
||||||
github: [JadedBlueEyes]
|
github: [JadedBlueEyes, nexy7574]
|
||||||
# Doesn't support an array, so we can only list nex
|
|
||||||
ko_fi: nexy7574
|
|
||||||
custom:
|
custom:
|
||||||
|
- https://ko-fi.com/nexy7574
|
||||||
- https://ko-fi.com/JadedBlueEyes
|
- https://ko-fi.com/JadedBlueEyes
|
||||||
|
|
1
.mailmap
1
.mailmap
|
@ -13,3 +13,4 @@ Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
|
||||||
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
|
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
|
||||||
Timo Kösters <timo@koesters.xyz>
|
Timo Kösters <timo@koesters.xyz>
|
||||||
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
|
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
|
||||||
|
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>
|
||||||
|
|
|
@ -9,7 +9,7 @@ repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v5.0.0
|
rev: v5.0.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-byte-order-marker
|
- id: fix-byte-order-marker
|
||||||
- id: check-case-conflict
|
- id: check-case-conflict
|
||||||
- id: check-symlinks
|
- id: check-symlinks
|
||||||
- id: destroyed-symlinks
|
- id: destroyed-symlinks
|
||||||
|
|
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
|
@ -7,5 +7,6 @@
|
||||||
"continuwuity",
|
"continuwuity",
|
||||||
"homeserver",
|
"homeserver",
|
||||||
"homeservers"
|
"homeservers"
|
||||||
]
|
],
|
||||||
|
"rust-analyzer.cargo.features": ["full"]
|
||||||
}
|
}
|
||||||
|
|
1267
Cargo.lock
generated
1267
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
66
Cargo.toml
66
Cargo.toml
|
@ -48,15 +48,15 @@ features = ["ffi", "std", "union"]
|
||||||
version = "0.6.2"
|
version = "0.6.2"
|
||||||
|
|
||||||
[workspace.dependencies.ctor]
|
[workspace.dependencies.ctor]
|
||||||
version = "0.2.9"
|
version = "0.5.0"
|
||||||
|
|
||||||
[workspace.dependencies.cargo_toml]
|
[workspace.dependencies.cargo_toml]
|
||||||
version = "0.21"
|
version = "0.22"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["features"]
|
features = ["features"]
|
||||||
|
|
||||||
[workspace.dependencies.toml]
|
[workspace.dependencies.toml]
|
||||||
version = "0.8.14"
|
version = "0.9.5"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["parse"]
|
features = ["parse"]
|
||||||
|
|
||||||
|
@ -352,7 +352,7 @@ version = "0.1.2"
|
||||||
[workspace.dependencies.ruma]
|
[workspace.dependencies.ruma]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||||
#branch = "conduwuit-changes"
|
#branch = "conduwuit-changes"
|
||||||
rev = "b753738047d1f443aca870896ef27ecaacf027da"
|
rev = "8fb268fa2771dfc3a1c8075ef1246e7c9a0a53fd"
|
||||||
features = [
|
features = [
|
||||||
"compat",
|
"compat",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -411,25 +411,28 @@ default-features = false
|
||||||
|
|
||||||
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
|
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
|
||||||
[workspace.dependencies.opentelemetry]
|
[workspace.dependencies.opentelemetry]
|
||||||
version = "0.21.0"
|
version = "0.30.0"
|
||||||
|
|
||||||
[workspace.dependencies.tracing-flame]
|
[workspace.dependencies.tracing-flame]
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
|
|
||||||
[workspace.dependencies.tracing-opentelemetry]
|
[workspace.dependencies.tracing-opentelemetry]
|
||||||
version = "0.22.0"
|
version = "0.31.0"
|
||||||
|
|
||||||
[workspace.dependencies.opentelemetry_sdk]
|
[workspace.dependencies.opentelemetry_sdk]
|
||||||
version = "0.21.2"
|
version = "0.30.0"
|
||||||
features = ["rt-tokio"]
|
features = ["rt-tokio"]
|
||||||
|
|
||||||
[workspace.dependencies.opentelemetry-jaeger]
|
[workspace.dependencies.opentelemetry-otlp]
|
||||||
version = "0.20.0"
|
version = "0.30.0"
|
||||||
features = ["rt-tokio"]
|
features = ["http", "trace", "logs", "metrics"]
|
||||||
|
|
||||||
|
[workspace.dependencies.opentelemetry-jaeger-propagator]
|
||||||
|
version = "0.30.0"
|
||||||
|
|
||||||
# optional sentry metrics for crash/panic reporting
|
# optional sentry metrics for crash/panic reporting
|
||||||
[workspace.dependencies.sentry]
|
[workspace.dependencies.sentry]
|
||||||
version = "0.37.0"
|
version = "0.42.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
|
@ -445,9 +448,9 @@ features = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.sentry-tracing]
|
[workspace.dependencies.sentry-tracing]
|
||||||
version = "0.37.0"
|
version = "0.42.0"
|
||||||
[workspace.dependencies.sentry-tower]
|
[workspace.dependencies.sentry-tower]
|
||||||
version = "0.37.0"
|
version = "0.42.0"
|
||||||
|
|
||||||
# jemalloc usage
|
# jemalloc usage
|
||||||
[workspace.dependencies.tikv-jemalloc-sys]
|
[workspace.dependencies.tikv-jemalloc-sys]
|
||||||
|
@ -476,7 +479,7 @@ features = ["use_std"]
|
||||||
version = "0.4"
|
version = "0.4"
|
||||||
|
|
||||||
[workspace.dependencies.nix]
|
[workspace.dependencies.nix]
|
||||||
version = "0.29.0"
|
version = "0.30.1"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["resource"]
|
features = ["resource"]
|
||||||
|
|
||||||
|
@ -498,7 +501,7 @@ version = "0.4.3"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.termimad]
|
[workspace.dependencies.termimad]
|
||||||
version = "0.31.2"
|
version = "0.34.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.checked_ops]
|
[workspace.dependencies.checked_ops]
|
||||||
|
@ -536,16 +539,21 @@ version = "0.2"
|
||||||
version = "0.2"
|
version = "0.2"
|
||||||
|
|
||||||
[workspace.dependencies.minicbor]
|
[workspace.dependencies.minicbor]
|
||||||
version = "0.26.3"
|
version = "2.1.1"
|
||||||
features = ["std"]
|
features = ["std"]
|
||||||
|
|
||||||
[workspace.dependencies.minicbor-serde]
|
[workspace.dependencies.minicbor-serde]
|
||||||
version = "0.4.1"
|
version = "0.6.0"
|
||||||
features = ["std"]
|
features = ["std"]
|
||||||
|
|
||||||
[workspace.dependencies.maplit]
|
[workspace.dependencies.maplit]
|
||||||
version = "1.0.2"
|
version = "1.0.2"
|
||||||
|
|
||||||
|
[workspace.dependencies.ldap3]
|
||||||
|
version = "0.11.5"
|
||||||
|
default-features = false
|
||||||
|
features = ["sync", "tls-rustls"]
|
||||||
|
|
||||||
#
|
#
|
||||||
# Patches
|
# Patches
|
||||||
#
|
#
|
||||||
|
@ -759,25 +767,6 @@ incremental = true
|
||||||
|
|
||||||
[profile.dev.package.conduwuit_core]
|
[profile.dev.package.conduwuit_core]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
#rustflags = [
|
|
||||||
# '--cfg', 'conduwuit_mods',
|
|
||||||
# '-Ztime-passes',
|
|
||||||
# '-Zmir-opt-level=0',
|
|
||||||
# '-Ztls-model=initial-exec',
|
|
||||||
# '-Cprefer-dynamic=true',
|
|
||||||
# '-Zstaticlib-prefer-dynamic=true',
|
|
||||||
# '-Zstaticlib-allow-rdylib-deps=true',
|
|
||||||
# '-Zpacked-bundled-libs=false',
|
|
||||||
# '-Zplt=true',
|
|
||||||
# '-Clink-arg=-Wl,--as-needed',
|
|
||||||
# '-Clink-arg=-Wl,--allow-shlib-undefined',
|
|
||||||
# '-Clink-arg=-Wl,-z,lazy',
|
|
||||||
# '-Clink-arg=-Wl,-z,unique',
|
|
||||||
# '-Clink-arg=-Wl,-z,nodlopen',
|
|
||||||
# '-Clink-arg=-Wl,-z,nodelete',
|
|
||||||
#]
|
|
||||||
[profile.dev.package.xtask-generate-commands]
|
|
||||||
inherits = "dev"
|
|
||||||
[profile.dev.package.conduwuit]
|
[profile.dev.package.conduwuit]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
#rustflags = [
|
#rustflags = [
|
||||||
|
@ -867,7 +856,7 @@ unused-qualifications = "warn"
|
||||||
#unused-results = "warn" # TODO
|
#unused-results = "warn" # TODO
|
||||||
|
|
||||||
## some sadness
|
## some sadness
|
||||||
elided_named_lifetimes = "allow" # TODO!
|
mismatched_lifetime_syntaxes = "allow" # TODO!
|
||||||
let_underscore_drop = "allow"
|
let_underscore_drop = "allow"
|
||||||
missing_docs = "allow"
|
missing_docs = "allow"
|
||||||
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
|
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
|
||||||
|
@ -1006,3 +995,6 @@ literal_string_with_formatting_args = { level = "allow", priority = 1 }
|
||||||
|
|
||||||
|
|
||||||
needless_raw_string_hashes = "allow"
|
needless_raw_string_hashes = "allow"
|
||||||
|
|
||||||
|
# TODO: Enable this lint & fix all instances
|
||||||
|
collapsible_if = "allow"
|
||||||
|
|
|
@ -57,7 +57,7 @@ Continuwuity aims to:
|
||||||
|
|
||||||
### Can I try it out?
|
### Can I try it out?
|
||||||
|
|
||||||
Check out the [documentation](introduction) for installation instructions.
|
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||||
|
|
||||||
There are currently no open registration Continuwuity instances available.
|
There are currently no open registration Continuwuity instances available.
|
||||||
|
|
||||||
|
|
|
@ -1,83 +0,0 @@
|
||||||
[Unit]
|
|
||||||
|
|
||||||
Description=Continuwuity - Matrix homeserver
|
|
||||||
Wants=network-online.target
|
|
||||||
After=network-online.target
|
|
||||||
Documentation=https://continuwuity.org/
|
|
||||||
RequiresMountsFor=/var/lib/private/conduwuit
|
|
||||||
Alias=matrix-conduwuit.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
DynamicUser=yes
|
|
||||||
Type=notify-reload
|
|
||||||
ReloadSignal=SIGUSR1
|
|
||||||
|
|
||||||
TTYPath=/dev/tty25
|
|
||||||
DeviceAllow=char-tty
|
|
||||||
StandardInput=tty-force
|
|
||||||
StandardOutput=tty
|
|
||||||
StandardError=journal+console
|
|
||||||
|
|
||||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
|
||||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
|
||||||
|
|
||||||
TTYReset=yes
|
|
||||||
# uncomment to allow buffer to be cleared every restart
|
|
||||||
TTYVTDisallocate=no
|
|
||||||
|
|
||||||
TTYColumns=120
|
|
||||||
TTYRows=40
|
|
||||||
|
|
||||||
AmbientCapabilities=
|
|
||||||
CapabilityBoundingSet=
|
|
||||||
|
|
||||||
DevicePolicy=closed
|
|
||||||
LockPersonality=yes
|
|
||||||
MemoryDenyWriteExecute=yes
|
|
||||||
NoNewPrivileges=yes
|
|
||||||
#ProcSubset=pid
|
|
||||||
ProtectClock=yes
|
|
||||||
ProtectControlGroups=yes
|
|
||||||
ProtectHome=yes
|
|
||||||
ProtectHostname=yes
|
|
||||||
ProtectKernelLogs=yes
|
|
||||||
ProtectKernelModules=yes
|
|
||||||
ProtectKernelTunables=yes
|
|
||||||
ProtectProc=invisible
|
|
||||||
ProtectSystem=strict
|
|
||||||
PrivateDevices=yes
|
|
||||||
PrivateMounts=yes
|
|
||||||
PrivateTmp=yes
|
|
||||||
PrivateUsers=yes
|
|
||||||
PrivateIPC=yes
|
|
||||||
RemoveIPC=yes
|
|
||||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
|
||||||
RestrictNamespaces=yes
|
|
||||||
RestrictRealtime=yes
|
|
||||||
RestrictSUIDSGID=yes
|
|
||||||
SystemCallArchitectures=native
|
|
||||||
SystemCallFilter=@system-service @resources
|
|
||||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
|
||||||
SystemCallErrorNumber=EPERM
|
|
||||||
StateDirectory=conduwuit
|
|
||||||
|
|
||||||
RuntimeDirectory=conduwuit
|
|
||||||
RuntimeDirectoryMode=0750
|
|
||||||
|
|
||||||
Environment=CONTINUWUITY_CONFIG=${CREDENTIALS_DIRECTORY}/config.toml
|
|
||||||
LoadCredential=config.toml:/etc/conduwuit/conduwuit.toml
|
|
||||||
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
|
|
||||||
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
|
|
||||||
|
|
||||||
ExecStart=/usr/bin/conduwuit
|
|
||||||
Restart=on-failure
|
|
||||||
RestartSec=5
|
|
||||||
|
|
||||||
TimeoutStopSec=4m
|
|
||||||
TimeoutStartSec=4m
|
|
||||||
|
|
||||||
StartLimitInterval=1m
|
|
||||||
StartLimitBurst=5
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
|
@ -79,9 +79,11 @@
|
||||||
# This is the only directory where continuwuity will save its data,
|
# This is the only directory where continuwuity will save its data,
|
||||||
# including media. Note: this was previously "/var/lib/matrix-conduit".
|
# including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||||
#
|
#
|
||||||
# YOU NEED TO EDIT THIS.
|
# YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
|
||||||
|
# `systemd` service. The service file sets it to `/var/lib/conduwuit`
|
||||||
|
# using an environment variable and also grants write access.
|
||||||
#
|
#
|
||||||
# example: "/var/lib/continuwuity"
|
# example: "/var/lib/conduwuit"
|
||||||
#
|
#
|
||||||
#database_path =
|
#database_path =
|
||||||
|
|
||||||
|
@ -589,13 +591,19 @@
|
||||||
#
|
#
|
||||||
#default_room_version = 11
|
#default_room_version = 11
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
|
||||||
|
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
|
||||||
|
# Jaeger) that supports the OpenTelemetry Protocol.
|
||||||
#
|
#
|
||||||
#allow_jaeger = false
|
# Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
|
||||||
|
# environment variable (defaults to http://localhost:4318).
|
||||||
|
#
|
||||||
|
#allow_otlp = false
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# Filter for OTLP tracing spans. This controls which spans are exported
|
||||||
|
# to the OTLP collector.
|
||||||
#
|
#
|
||||||
#jaeger_filter = "info"
|
#otlp_filter = "info"
|
||||||
|
|
||||||
# If the 'perf_measurements' compile-time feature is enabled, enables
|
# If the 'perf_measurements' compile-time feature is enabled, enables
|
||||||
# collecting folded stack trace profile of tracing spans using
|
# collecting folded stack trace profile of tracing spans using
|
||||||
|
@ -1696,6 +1704,10 @@
|
||||||
#
|
#
|
||||||
#config_reload_signal = true
|
#config_reload_signal = true
|
||||||
|
|
||||||
|
# This item is undocumented. Please contribute documentation for it.
|
||||||
|
#
|
||||||
|
#ldap = false
|
||||||
|
|
||||||
[global.tls]
|
[global.tls]
|
||||||
|
|
||||||
# Path to a valid TLS certificate file.
|
# Path to a valid TLS certificate file.
|
||||||
|
@ -1774,3 +1786,91 @@
|
||||||
# is 33.55MB. Setting it to 0 disables blurhashing.
|
# is 33.55MB. Setting it to 0 disables blurhashing.
|
||||||
#
|
#
|
||||||
#blurhash_max_raw_size = 33554432
|
#blurhash_max_raw_size = 33554432
|
||||||
|
|
||||||
|
[global.ldap]
|
||||||
|
|
||||||
|
# Whether to enable LDAP login.
|
||||||
|
#
|
||||||
|
# example: "true"
|
||||||
|
#
|
||||||
|
#enable = false
|
||||||
|
|
||||||
|
# Whether to force LDAP authentication or authorize classical password
|
||||||
|
# login.
|
||||||
|
#
|
||||||
|
# example: "true"
|
||||||
|
#
|
||||||
|
#ldap_only = false
|
||||||
|
|
||||||
|
# URI of the LDAP server.
|
||||||
|
#
|
||||||
|
# example: "ldap://ldap.example.com:389"
|
||||||
|
#
|
||||||
|
#uri = ""
|
||||||
|
|
||||||
|
# Root of the searches.
|
||||||
|
#
|
||||||
|
# example: "ou=users,dc=example,dc=org"
|
||||||
|
#
|
||||||
|
#base_dn = ""
|
||||||
|
|
||||||
|
# Bind DN if anonymous search is not enabled.
|
||||||
|
#
|
||||||
|
# You can use the variable `{username}` that will be replaced by the
|
||||||
|
# entered username. In such case, the password used to bind will be the
|
||||||
|
# one provided for the login and not the one given by
|
||||||
|
# `bind_password_file`. Beware: automatically granting admin rights will
|
||||||
|
# not work if you use this direct bind instead of a LDAP search.
|
||||||
|
#
|
||||||
|
# example: "cn=ldap-reader,dc=example,dc=org" or
|
||||||
|
# "cn={username},ou=users,dc=example,dc=org"
|
||||||
|
#
|
||||||
|
#bind_dn = ""
|
||||||
|
|
||||||
|
# Path to a file on the system that contains the password for the
|
||||||
|
# `bind_dn`.
|
||||||
|
#
|
||||||
|
# The server must be able to access the file, and it must not be empty.
|
||||||
|
#
|
||||||
|
#bind_password_file = ""
|
||||||
|
|
||||||
|
# Search filter to limit user searches.
|
||||||
|
#
|
||||||
|
# You can use the variable `{username}` that will be replaced by the
|
||||||
|
# entered username for more complex filters.
|
||||||
|
#
|
||||||
|
# example: "(&(objectClass=person)(memberOf=matrix))"
|
||||||
|
#
|
||||||
|
#filter = "(objectClass=*)"
|
||||||
|
|
||||||
|
# Attribute to use to uniquely identify the user.
|
||||||
|
#
|
||||||
|
# example: "uid" or "cn"
|
||||||
|
#
|
||||||
|
#uid_attribute = "uid"
|
||||||
|
|
||||||
|
# Attribute containing the display name of the user.
|
||||||
|
#
|
||||||
|
# example: "givenName" or "sn"
|
||||||
|
#
|
||||||
|
#name_attribute = "givenName"
|
||||||
|
|
||||||
|
# Root of the searches for admin users.
|
||||||
|
#
|
||||||
|
# Defaults to `base_dn` if empty.
|
||||||
|
#
|
||||||
|
# example: "ou=admins,dc=example,dc=org"
|
||||||
|
#
|
||||||
|
#admin_base_dn = ""
|
||||||
|
|
||||||
|
# The LDAP search filter to find administrative users for continuwuity.
|
||||||
|
#
|
||||||
|
# If left blank, administrative state must be configured manually for each
|
||||||
|
# user.
|
||||||
|
#
|
||||||
|
# You can use the variable `{username}` that will be replaced by the
|
||||||
|
# entered username for more complex filters.
|
||||||
|
#
|
||||||
|
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
|
||||||
|
#
|
||||||
|
#admin_filter = ""
|
||||||
|
|
44
debian/postinst
vendored
44
debian/postinst
vendored
|
@ -1,44 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# TODO: implement debconf support that is maintainable without duplicating the config
|
|
||||||
#. /usr/share/debconf/confmodule
|
|
||||||
|
|
||||||
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
|
||||||
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
configure)
|
|
||||||
# Create the `conduwuit` user if it does not exist yet.
|
|
||||||
if ! getent passwd conduwuit > /dev/null ; then
|
|
||||||
echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2
|
|
||||||
adduser --system --group --quiet \
|
|
||||||
--home "$CONDUWUIT_DATABASE_PATH" \
|
|
||||||
--disabled-login \
|
|
||||||
--shell "/usr/sbin/nologin" \
|
|
||||||
conduwuit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create the database path if it does not exist yet and fix up ownership
|
|
||||||
# and permissions for the config.
|
|
||||||
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
|
|
||||||
|
|
||||||
# symlink the previous location for compatibility if it does not exist yet.
|
|
||||||
if ! test -L "/var/lib/matrix-conduit" ; then
|
|
||||||
ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
|
|
||||||
fi
|
|
||||||
|
|
||||||
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
|
|
||||||
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
|
|
||||||
|
|
||||||
chmod -v 740 "$CONDUWUIT_DATABASE_PATH"
|
|
||||||
|
|
||||||
echo ''
|
|
||||||
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
|
||||||
echo 'To start the server, run: systemctl start conduwuit.service'
|
|
||||||
echo ''
|
|
||||||
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
|
@ -199,32 +199,57 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Extract dynamically linked dependencies
|
# Extract dynamically linked dependencies
|
||||||
RUN <<EOF
|
RUN <<'DEPS_EOF'
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
mkdir /out/libs
|
mkdir /out/libs /out/libs-root
|
||||||
mkdir /out/libs-root
|
|
||||||
|
# Process each binary
|
||||||
for BINARY in /out/sbin/*; do
|
for BINARY in /out/sbin/*; do
|
||||||
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
|
if lddtree_output=$(lddtree "$BINARY" 2>/dev/null) && [ -n "$lddtree_output" ]; then
|
||||||
|
echo "$lddtree_output" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | \
|
||||||
|
awk '{dest = ($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2; print "install -D " $1 " " dest}' | \
|
||||||
|
while read cmd; do eval "$cmd"; done
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
EOF
|
|
||||||
|
# Show what will be copied to runtime
|
||||||
|
echo "=== Libraries being copied to runtime image:"
|
||||||
|
find /out/libs* -type f 2>/dev/null | sort || echo "No libraries found"
|
||||||
|
DEPS_EOF
|
||||||
|
|
||||||
|
FROM ubuntu:latest AS prepper
|
||||||
|
|
||||||
|
# Create layer structure
|
||||||
|
RUN mkdir -p /layer1/etc/ssl/certs \
|
||||||
|
/layer2/usr/lib \
|
||||||
|
/layer3/sbin /layer3/sbom
|
||||||
|
|
||||||
|
# Copy SSL certs and root-path libraries to layer1 (ultra-stable)
|
||||||
|
COPY --from=base /etc/ssl/certs /layer1/etc/ssl/certs
|
||||||
|
COPY --from=builder /out/libs-root/ /layer1/
|
||||||
|
|
||||||
|
# Copy application libraries to layer2 (semi-stable)
|
||||||
|
COPY --from=builder /out/libs/ /layer2/usr/lib/
|
||||||
|
|
||||||
|
# Copy binaries and SBOM to layer3 (volatile)
|
||||||
|
COPY --from=builder /out/sbin/ /layer3/sbin/
|
||||||
|
COPY --from=builder /out/sbom/ /layer3/sbom/
|
||||||
|
|
||||||
|
# Fix permissions after copying
|
||||||
|
RUN chmod -R 755 /layer1 /layer2 /layer3
|
||||||
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
|
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
# Copy root certs for tls into image
|
# Copy ultra-stable layer (SSL certs, system libraries)
|
||||||
# You can also mount the certs from the host
|
COPY --from=prepper /layer1/ /
|
||||||
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
|
|
||||||
COPY --from=base /etc/ssl/certs /etc/ssl/certs
|
|
||||||
|
|
||||||
# Copy our build
|
# Copy semi-stable layer (application libraries)
|
||||||
COPY --from=builder /out/sbin/ /sbin/
|
COPY --from=prepper /layer2/ /
|
||||||
# Copy SBOM
|
|
||||||
COPY --from=builder /out/sbom/ /sbom/
|
|
||||||
|
|
||||||
# Copy dynamic libraries to root
|
# Copy volatile layer (binaries, SBOM)
|
||||||
COPY --from=builder /out/libs-root/ /
|
COPY --from=prepper /layer3/ /
|
||||||
COPY --from=builder /out/libs/ /usr/lib/
|
|
||||||
|
|
||||||
# Inform linker where to find libraries
|
# Inform linker where to find libraries
|
||||||
ENV LD_LIBRARY_PATH=/usr/lib
|
ENV LD_LIBRARY_PATH=/usr/lib
|
||||||
|
|
|
@ -21,6 +21,7 @@ This document contains the help content for the `admin` command-line program.
|
||||||
* [`admin users list-joined-rooms`↴](#admin-users-list-joined-rooms)
|
* [`admin users list-joined-rooms`↴](#admin-users-list-joined-rooms)
|
||||||
* [`admin users force-join-room`↴](#admin-users-force-join-room)
|
* [`admin users force-join-room`↴](#admin-users-force-join-room)
|
||||||
* [`admin users force-leave-room`↴](#admin-users-force-leave-room)
|
* [`admin users force-leave-room`↴](#admin-users-force-leave-room)
|
||||||
|
* [`admin users force-leave-remote-room`↴](#admin-users-force-leave-remote-room)
|
||||||
* [`admin users force-demote`↴](#admin-users-force-demote)
|
* [`admin users force-demote`↴](#admin-users-force-demote)
|
||||||
* [`admin users make-user-admin`↴](#admin-users-make-user-admin)
|
* [`admin users make-user-admin`↴](#admin-users-make-user-admin)
|
||||||
* [`admin users put-room-tag`↴](#admin-users-put-room-tag)
|
* [`admin users put-room-tag`↴](#admin-users-put-room-tag)
|
||||||
|
@ -295,6 +296,7 @@ You can find the ID using the `list-appservices` command.
|
||||||
* `list-joined-rooms` — - Lists all the rooms (local and remote) that the specified user is joined in
|
* `list-joined-rooms` — - Lists all the rooms (local and remote) that the specified user is joined in
|
||||||
* `force-join-room` — - Manually join a local user to a room
|
* `force-join-room` — - Manually join a local user to a room
|
||||||
* `force-leave-room` — - Manually leave a local user from a room
|
* `force-leave-room` — - Manually leave a local user from a room
|
||||||
|
* `force-leave-remote-room` — - Manually leave a remote room for a local user
|
||||||
* `force-demote` — - Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
* `force-demote` — - Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
||||||
* `make-user-admin` — - Grant server-admin privileges to a user
|
* `make-user-admin` — - Grant server-admin privileges to a user
|
||||||
* `put-room-tag` — - Puts a room tag for the specified user and room ID
|
* `put-room-tag` — - Puts a room tag for the specified user and room ID
|
||||||
|
@ -449,6 +451,19 @@ Reverses the effects of the `suspend` command, allowing the user to send message
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## `admin users force-leave-remote-room`
|
||||||
|
|
||||||
|
- Manually leave a remote room for a local user
|
||||||
|
|
||||||
|
**Usage:** `admin users force-leave-remote-room <USER_ID> <ROOM_ID>`
|
||||||
|
|
||||||
|
###### **Arguments:**
|
||||||
|
|
||||||
|
* `<USER_ID>`
|
||||||
|
* `<ROOM_ID>`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## `admin users force-demote`
|
## `admin users force-demote`
|
||||||
|
|
||||||
- Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
- Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
||||||
|
|
|
@ -9,24 +9,11 @@
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## Debian systemd unit file
|
## systemd unit file
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>Debian systemd unit file</summary>
|
<summary>systemd unit file</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
{{#include ../../debian/conduwuit.service}}
|
{{#include ../../pkg/conduwuit.service}}
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
## Arch Linux systemd unit file
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Arch Linux systemd unit file</summary>
|
|
||||||
|
|
||||||
```
|
|
||||||
{{#include ../../arch/conduwuit.service}}
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
{{#include ../../debian/README.md}}
|
{{#include ../../pkg/debian/README.md}}
|
||||||
|
|
|
@ -12,6 +12,15 @@ services:
|
||||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||||
networks:
|
networks:
|
||||||
- proxy
|
- proxy
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||||
|
- "traefik.http.routers.continuwuity.entrypoints=websecure" # your HTTPS entry point
|
||||||
|
- "traefik.http.routers.continuwuity.tls=true"
|
||||||
|
- "traefik.http.routers.continuwuity.service=continuwuity"
|
||||||
|
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
|
||||||
|
# possibly, depending on your config:
|
||||||
|
# - "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
|
||||||
environment:
|
environment:
|
||||||
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||||
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||||
|
|
|
@ -12,6 +12,14 @@ services:
|
||||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||||
networks:
|
networks:
|
||||||
- proxy
|
- proxy
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||||
|
- "traefik.http.routers.continuwuity.entrypoints=websecure"
|
||||||
|
- "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
|
||||||
|
# Uncomment and adjust the following if you want to use middleware
|
||||||
|
# - "traefik.http.routers.continuwuity.middlewares=secureHeaders@file"
|
||||||
environment:
|
environment:
|
||||||
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||||
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
|
|
4
docs/static/announcements.json
vendored
4
docs/static/announcements.json
vendored
|
@ -6,8 +6,8 @@
|
||||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 2,
|
"id": 3,
|
||||||
"message": "🎉 Continuwuity v0.5.0-rc.6 is now available! This release includes improved knock-restricted room handling, automatic support contact configuration, and a new HTML landing page. Check [the release notes for full details](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.6) and upgrade instructions."
|
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
32
flake.lock
generated
32
flake.lock
generated
|
@ -153,11 +153,11 @@
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1751525020,
|
"lastModified": 1755585599,
|
||||||
"narHash": "sha256-oDO6lCYS5Bf4jUITChj9XV7k3TP38DE0Ckz5n5ORCME=",
|
"narHash": "sha256-tl/0cnsqB/Yt7DbaGMel2RLa7QG5elA8lkaOXli6VdY=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "a1a5f92f47787e7df9f30e5e5ac13e679215aa1e",
|
"rev": "6ed03ef4c8ec36d193c18e06b9ecddde78fb7e42",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -513,23 +513,6 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rocksdb": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1741308171,
|
|
||||||
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
|
|
||||||
"ref": "v9.11.1",
|
|
||||||
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
|
|
||||||
"revCount": 13177,
|
|
||||||
"type": "git",
|
|
||||||
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"ref": "v9.11.1",
|
|
||||||
"type": "git",
|
|
||||||
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"attic": "attic",
|
"attic": "attic",
|
||||||
|
@ -539,18 +522,17 @@
|
||||||
"flake-compat": "flake-compat_3",
|
"flake-compat": "flake-compat_3",
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": "flake-utils",
|
||||||
"nix-filter": "nix-filter",
|
"nix-filter": "nix-filter",
|
||||||
"nixpkgs": "nixpkgs_5",
|
"nixpkgs": "nixpkgs_5"
|
||||||
"rocksdb": "rocksdb"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1751433876,
|
"lastModified": 1755504847,
|
||||||
"narHash": "sha256-IsdwOcvLLDDlkFNwhdD5BZy20okIQL01+UQ7Kxbqh8s=",
|
"narHash": "sha256-VX0B9hwhJypCGqncVVLC+SmeMVd/GAYbJZ0MiiUn2Pk=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "11d45c881389dae90b0da5a94cde52c79d0fc7ef",
|
"rev": "a905e3b21b144d77e1b304e49f3264f6f8d4db75",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
30
flake.nix
30
flake.nix
|
@ -16,10 +16,6 @@
|
||||||
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
||||||
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
||||||
rocksdb = {
|
|
||||||
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=v9.11.1";
|
|
||||||
flake = false;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs =
|
outputs =
|
||||||
|
@ -31,20 +27,24 @@
|
||||||
inherit system;
|
inherit system;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
fnx = inputs.fenix.packages.${system};
|
||||||
# The Rust toolchain to use
|
# The Rust toolchain to use
|
||||||
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
|
toolchain = fnx.combine [
|
||||||
file = ./rust-toolchain.toml;
|
(fnx.fromToolchainFile {
|
||||||
|
file = ./rust-toolchain.toml;
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
# See also `rust-toolchain.toml`
|
||||||
sha256 = "sha256-KUm16pHj+cRedf8vxs/Hd2YWxpOrWZ7UOrwhILdSJBU=";
|
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
|
||||||
};
|
})
|
||||||
|
fnx.complete.rustfmt
|
||||||
|
];
|
||||||
|
|
||||||
mkScope =
|
mkScope =
|
||||||
pkgs:
|
pkgs:
|
||||||
pkgs.lib.makeScope pkgs.newScope (self: {
|
pkgs.lib.makeScope pkgs.newScope (self: {
|
||||||
inherit pkgs inputs;
|
inherit pkgs inputs;
|
||||||
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
|
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
|
||||||
main = self.callPackage ./nix/pkgs/main { };
|
main = self.callPackage ./pkg/nix/pkgs/main { };
|
||||||
liburing = pkgs.liburing.overrideAttrs {
|
liburing = pkgs.liburing.overrideAttrs {
|
||||||
# Tests weren't building
|
# Tests weren't building
|
||||||
outputs = [
|
outputs = [
|
||||||
|
@ -61,8 +61,14 @@
|
||||||
inherit (self) liburing;
|
inherit (self) liburing;
|
||||||
}).overrideAttrs
|
}).overrideAttrs
|
||||||
(old: {
|
(old: {
|
||||||
src = inputs.rocksdb;
|
src = pkgsHost.fetchFromGitea {
|
||||||
version = "v9.11.1";
|
domain = "forgejo.ellis.link";
|
||||||
|
owner = "continuwuation";
|
||||||
|
repo = "rocksdb";
|
||||||
|
rev = "10.4.fb";
|
||||||
|
sha256 = "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=";
|
||||||
|
};
|
||||||
|
version = "v10.4.fb";
|
||||||
cmakeFlags =
|
cmakeFlags =
|
||||||
pkgs.lib.subtractLists [
|
pkgs.lib.subtractLists [
|
||||||
# No real reason to have snappy or zlib, no one uses this
|
# No real reason to have snappy or zlib, no one uses this
|
||||||
|
|
|
@ -1,25 +1,24 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
|
|
||||||
Description=Continuwuity - Matrix homeserver
|
Description=Continuwuity - Matrix homeserver
|
||||||
|
Documentation=https://continuwuity.org/
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
Documentation=https://continuwuity.org/
|
|
||||||
Alias=matrix-conduwuit.service
|
Alias=matrix-conduwuit.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
DynamicUser=yes
|
DynamicUser=yes
|
||||||
User=conduwuit
|
User=conduwuit
|
||||||
Group=conduwuit
|
Group=conduwuit
|
||||||
Type=notify
|
Type=notify-reload
|
||||||
|
ReloadSignal=SIGUSR1
|
||||||
|
|
||||||
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||||
|
|
||||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||||
|
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
|
||||||
|
|
||||||
ExecStart=/usr/sbin/conduwuit
|
ExecStart=/usr/bin/conduwuit
|
||||||
|
|
||||||
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
|
|
||||||
|
|
||||||
AmbientCapabilities=
|
AmbientCapabilities=
|
||||||
CapabilityBoundingSet=
|
CapabilityBoundingSet=
|
||||||
|
@ -52,16 +51,17 @@ SystemCallArchitectures=native
|
||||||
SystemCallFilter=@system-service @resources
|
SystemCallFilter=@system-service @resources
|
||||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||||
SystemCallErrorNumber=EPERM
|
SystemCallErrorNumber=EPERM
|
||||||
#StateDirectory=conduwuit
|
|
||||||
|
|
||||||
|
StateDirectory=conduwuit
|
||||||
|
ConfigurationDirectory=conduwuit
|
||||||
RuntimeDirectory=conduwuit
|
RuntimeDirectory=conduwuit
|
||||||
RuntimeDirectoryMode=0750
|
RuntimeDirectoryMode=0750
|
||||||
|
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
|
|
||||||
TimeoutStopSec=2m
|
TimeoutStopSec=4m
|
||||||
TimeoutStartSec=2m
|
TimeoutStartSec=4m
|
||||||
|
|
||||||
StartLimitInterval=1m
|
StartLimitInterval=1m
|
||||||
StartLimitBurst=5
|
StartLimitBurst=5
|
20
pkg/debian/postinst
Normal file
20
pkg/debian/postinst
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# TODO: implement debconf support that is maintainable without duplicating the config
|
||||||
|
#. /usr/share/debconf/confmodule
|
||||||
|
|
||||||
|
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
||||||
|
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
configure)
|
||||||
|
echo ''
|
||||||
|
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
||||||
|
echo 'To start the server, run: systemctl start conduwuit.service'
|
||||||
|
echo ''
|
||||||
|
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
#DEBHELPER#
|
|
@ -20,24 +20,18 @@ case $1 in
|
||||||
|
|
||||||
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
|
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
|
||||||
if test -L "$CONDUWUIT_CONFIG_PATH"; then
|
if test -L "$CONDUWUIT_CONFIG_PATH"; then
|
||||||
echo "Deleting conduwuit configuration files"
|
echo "Deleting continuwuity configuration files"
|
||||||
rm -v -r "$CONDUWUIT_CONFIG_PATH"
|
rm -v -r "$CONDUWUIT_CONFIG_PATH"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
|
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
|
||||||
if test -L "$CONDUWUIT_DATABASE_PATH"; then
|
if test -L "$CONDUWUIT_DATABASE_PATH"; then
|
||||||
echo "Deleting conduwuit database directory"
|
echo "Deleting continuwuity database directory"
|
||||||
rm -r "$CONDUWUIT_DATABASE_PATH"
|
rm -r "$CONDUWUIT_DATABASE_PATH"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
|
|
||||||
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
|
|
||||||
echo "Removing matrix-conduit symlink"
|
|
||||||
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
80
pkg/fedora/continuwuity.spec.rpkg
Normal file
80
pkg/fedora/continuwuity.spec.rpkg
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
# This should be run using rpkg-util: https://docs.pagure.org/rpkg-util
|
||||||
|
# it requires Internet access and is not suitable for Fedora main repos
|
||||||
|
# TODO: rpkg-util is no longer maintained, find a replacement
|
||||||
|
|
||||||
|
Name: continuwuity
|
||||||
|
Version: {{{ git_repo_version }}}
|
||||||
|
Release: 1%{?dist}
|
||||||
|
Summary: Very cool Matrix chat homeserver written in Rust
|
||||||
|
|
||||||
|
License: Apache-2.0 AND MIT
|
||||||
|
|
||||||
|
URL: https://continuwuity.org
|
||||||
|
VCS: {{{ git_repo_vcs }}}
|
||||||
|
Source: {{{ git_repo_pack }}}
|
||||||
|
|
||||||
|
BuildRequires: cargo-rpm-macros >= 25
|
||||||
|
BuildRequires: systemd-rpm-macros
|
||||||
|
# Needed to build rust-librocksdb-sys
|
||||||
|
BuildRequires: clang
|
||||||
|
BuildRequires: liburing-devel
|
||||||
|
|
||||||
|
Requires: liburing
|
||||||
|
Requires: glibc
|
||||||
|
Requires: libstdc++
|
||||||
|
|
||||||
|
%global _description %{expand:
|
||||||
|
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
|
||||||
|
|
||||||
|
%description %{_description}
|
||||||
|
|
||||||
|
%prep
|
||||||
|
{{{ git_repo_setup_macro }}}
|
||||||
|
%cargo_prep -N
|
||||||
|
# Perform an online build so Git dependencies can be retrieved
|
||||||
|
sed -i 's/^offline = true$//' .cargo/config.toml
|
||||||
|
|
||||||
|
%build
|
||||||
|
%cargo_build
|
||||||
|
|
||||||
|
# Here's the one legally required mystery incantation in this file.
|
||||||
|
# Some of our dependencies have source files which are (for some reason) marked as executable.
|
||||||
|
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
|
||||||
|
# at the end of the build step, and then the BRP shebang mangling script checks
|
||||||
|
# the entire buildroot to find executable files, and fails the build because
|
||||||
|
# it thinks Rust's file attributes are shebangs because they start with `#!`.
|
||||||
|
# So we have to clear the executable bit on all of them before that happens.
|
||||||
|
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
|
||||||
|
|
||||||
|
# TODO: this fails currently because it's forced to run in offline mode
|
||||||
|
# {cargo_license -- --no-dev} > LICENSE.dependencies
|
||||||
|
|
||||||
|
%install
|
||||||
|
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
|
||||||
|
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
|
||||||
|
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
|
||||||
|
|
||||||
|
%files
|
||||||
|
%license LICENSE
|
||||||
|
%license src/core/matrix/state_res/LICENSE
|
||||||
|
%doc CODE_OF_CONDUCT.md
|
||||||
|
%doc CONTRIBUTING.md
|
||||||
|
%doc README.md
|
||||||
|
%doc SECURITY.md
|
||||||
|
%config %{_sysconfdir}/conduwuit/conduwuit.toml
|
||||||
|
|
||||||
|
%{_bindir}/conduwuit
|
||||||
|
%{_unitdir}/conduwuit.service
|
||||||
|
# Do not create /var/lib/conduwuit, systemd will create it if necessary
|
||||||
|
|
||||||
|
%post
|
||||||
|
%systemd_post conduwuit.service
|
||||||
|
|
||||||
|
%preun
|
||||||
|
%systemd_preun conduwuit.service
|
||||||
|
|
||||||
|
%postun
|
||||||
|
%systemd_postun_with_restart conduwuit.service
|
||||||
|
|
||||||
|
%changelog
|
||||||
|
{{{ git_repo_changelog }}}
|
|
@ -1,26 +1,59 @@
|
||||||
{
|
{
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||||
"extends": [
|
"extends": ["config:recommended"],
|
||||||
"config:recommended"
|
"lockFileMaintenance": {
|
||||||
],
|
"enabled": true,
|
||||||
"lockFileMaintenance": {
|
"schedule": ["at any time"]
|
||||||
"enabled": true,
|
},
|
||||||
"schedule": [
|
"nix": {
|
||||||
"at any time"
|
"enabled": true
|
||||||
|
},
|
||||||
|
"labels": ["Dependencies", "Dependencies/Renovate"],
|
||||||
|
"ignoreDeps": [
|
||||||
|
"tikv-jemallocator",
|
||||||
|
"tikv-jemalloc-sys",
|
||||||
|
"tikv-jemalloc-ctl",
|
||||||
|
"opentelemetry",
|
||||||
|
"opentelemetry_sdk",
|
||||||
|
"opentelemetry-jaeger",
|
||||||
|
"tracing-opentelemetry"
|
||||||
|
],
|
||||||
|
"github-actions": {
|
||||||
|
"enabled": true,
|
||||||
|
"managerFilePatterns": [
|
||||||
|
"/(^|/)\\.forgejo/workflows/[^/]+\\.ya?ml$/",
|
||||||
|
"/(^|/)\\.forgejo/actions/[^/]+/action\\.ya?ml$/",
|
||||||
|
"/(^|/)\\.github/workflows/[^/]+\\.ya?ml$/",
|
||||||
|
"/(^|/)\\.github/actions/[^/]+/action\\.ya?ml$/"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"packageRules": [
|
||||||
|
{
|
||||||
|
"description": "Batch minor and patch GitHub Actions updates",
|
||||||
|
"matchManagers": ["github-actions"],
|
||||||
|
"matchUpdateTypes": ["minor", "patch"],
|
||||||
|
"groupName": "github-actions-non-major"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Group Rust toolchain updates into a single PR",
|
||||||
|
"matchManagers": ["custom.regex"],
|
||||||
|
"matchPackageNames": ["rust", "rustc", "cargo"],
|
||||||
|
"groupName": "rust-toolchain"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Group lockfile updates into a single PR",
|
||||||
|
"matchUpdateTypes": ["lockFileMaintenance"],
|
||||||
|
"groupName": "lockfile-maintenance"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Batch patch-level Rust dependency updates",
|
||||||
|
"matchManagers": ["cargo"],
|
||||||
|
"matchUpdateTypes": ["patch"],
|
||||||
|
"groupName": "rust-patch-updates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"matchManagers": ["cargo"],
|
||||||
|
"prConcurrentLimit": 5
|
||||||
|
}
|
||||||
]
|
]
|
||||||
},
|
|
||||||
"nix": {
|
|
||||||
"enabled": true
|
|
||||||
},
|
|
||||||
"labels": [
|
|
||||||
"dependencies",
|
|
||||||
"github_actions"
|
|
||||||
],
|
|
||||||
"ignoreDeps": [
|
|
||||||
"tikv-jemllocator",
|
|
||||||
"tikv-jemalloc-sys",
|
|
||||||
"tikv-jemalloc-ctl",
|
|
||||||
"opentelemetry-rust",
|
|
||||||
"tracing-opentelemetry"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,13 +9,16 @@
|
||||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||||
|
|
||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.87.0"
|
|
||||||
profile = "minimal"
|
profile = "minimal"
|
||||||
|
channel = "1.89.0"
|
||||||
components = [
|
components = [
|
||||||
# For rust-analyzer
|
# For rust-analyzer
|
||||||
"rust-src",
|
"rust-src",
|
||||||
"rust-analyzer",
|
"rust-analyzer",
|
||||||
# For CI and editors
|
# For CI and editors
|
||||||
"rustfmt",
|
|
||||||
"clippy",
|
"clippy",
|
||||||
|
# you have to install rustfmt nightly yourself (if you're not on NixOS)
|
||||||
|
#
|
||||||
|
# The rust-toolchain.toml file doesn't provide any syntax for specifying components from different toolchains
|
||||||
|
# "rustfmt"
|
||||||
]
|
]
|
||||||
|
|
|
@ -89,6 +89,7 @@ serde_yaml.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tracing-subscriber.workspace = true
|
tracing-subscriber.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
ctor.workspace = true
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
|
@ -281,15 +281,8 @@ pub(super) async fn get_remote_pdu(
|
||||||
vec![(event_id, value, room_id)]
|
vec![(event_id, value, room_id)]
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("Attempting to handle event ID {event_id} as backfilled PDU");
|
|
||||||
self.services
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.backfill_pdu(&server, response.pdu)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let text = serde_json::to_string_pretty(&json)?;
|
let text = serde_json::to_string_pretty(&json)?;
|
||||||
let msg = "Got PDU from specified server and handled as backfilled";
|
let msg = "Got PDU from specified server:";
|
||||||
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
|
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,8 @@ pub(crate) use crate::{context::Context, utils::get_room_info};
|
||||||
|
|
||||||
pub(crate) const PAGE_SIZE: usize = 100;
|
pub(crate) const PAGE_SIZE: usize = 100;
|
||||||
|
|
||||||
|
use ctor::{ctor, dtor};
|
||||||
|
|
||||||
conduwuit::mod_ctor! {}
|
conduwuit::mod_ctor! {}
|
||||||
conduwuit::mod_dtor! {}
|
conduwuit::mod_dtor! {}
|
||||||
conduwuit::rustc_flags_capture! {}
|
conduwuit::rustc_flags_capture! {}
|
||||||
|
|
|
@ -57,5 +57,5 @@ pub(super) async fn pdus(
|
||||||
.try_collect()
|
.try_collect()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!("{result:#?}")).await
|
self.write_str(&format!("```\n{result:#?}\n```")).await
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use std::{collections::BTreeMap, fmt::Write as _};
|
use std::{collections::BTreeMap, fmt::Write as _};
|
||||||
|
|
||||||
use api::client::{
|
use api::client::{
|
||||||
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, update_avatar_url,
|
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room,
|
||||||
update_displayname,
|
update_avatar_url, update_displayname,
|
||||||
};
|
};
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
||||||
|
@ -68,7 +68,8 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||||
// Create user
|
// Create user
|
||||||
self.services
|
self.services
|
||||||
.users
|
.users
|
||||||
.create(&user_id, Some(password.as_str()))?;
|
.create(&user_id, Some(password.as_str()), None)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Default to pretty displayname
|
// Default to pretty displayname
|
||||||
let mut displayname = user_id.localpart().to_owned();
|
let mut displayname = user_id.localpart().to_owned();
|
||||||
|
@ -284,6 +285,7 @@ pub(super) async fn reset_password(&self, username: String, password: Option<Str
|
||||||
.services
|
.services
|
||||||
.users
|
.users
|
||||||
.set_password(&user_id, Some(new_password.as_str()))
|
.set_password(&user_id, Some(new_password.as_str()))
|
||||||
|
.await
|
||||||
{
|
{
|
||||||
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
||||||
| Ok(()) => {
|
| Ok(()) => {
|
||||||
|
@ -924,3 +926,29 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
pub(super) async fn force_leave_remote_room(
|
||||||
|
&self,
|
||||||
|
user_id: String,
|
||||||
|
room_id: OwnedRoomOrAliasId,
|
||||||
|
) -> Result {
|
||||||
|
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||||
|
let (room_id, _) = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_with_servers(&room_id, None)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
self.services.globals.user_is_local(&user_id),
|
||||||
|
"Parsed user_id must be a local user"
|
||||||
|
);
|
||||||
|
remote_leave_room(self.services, &user_id, &room_id, None)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
|
@ -103,6 +103,12 @@ pub enum UserCommand {
|
||||||
room_id: OwnedRoomOrAliasId,
|
room_id: OwnedRoomOrAliasId,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// - Manually leave a remote room for a local user.
|
||||||
|
ForceLeaveRemoteRoom {
|
||||||
|
user_id: String,
|
||||||
|
room_id: OwnedRoomOrAliasId,
|
||||||
|
},
|
||||||
|
|
||||||
/// - Forces the specified user to drop their power levels to the room
|
/// - Forces the specified user to drop their power levels to the room
|
||||||
/// default, if their permissions allow and the auth check permits
|
/// default, if their permissions allow and the auth check permits
|
||||||
ForceDemote {
|
ForceDemote {
|
||||||
|
|
|
@ -49,6 +49,9 @@ jemalloc_stats = [
|
||||||
"conduwuit-core/jemalloc_stats",
|
"conduwuit-core/jemalloc_stats",
|
||||||
"conduwuit-service/jemalloc_stats",
|
"conduwuit-service/jemalloc_stats",
|
||||||
]
|
]
|
||||||
|
ldap = [
|
||||||
|
"conduwuit-service/ldap"
|
||||||
|
]
|
||||||
release_max_log_level = [
|
release_max_log_level = [
|
||||||
"conduwuit-core/release_max_log_level",
|
"conduwuit-core/release_max_log_level",
|
||||||
"conduwuit-service/release_max_log_level",
|
"conduwuit-service/release_max_log_level",
|
||||||
|
@ -90,6 +93,7 @@ serde.workspace = true
|
||||||
sha1.workspace = true
|
sha1.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
ctor.workspace = true
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
|
@ -373,7 +373,7 @@ pub(crate) async fn register_route(
|
||||||
let password = if is_guest { None } else { body.password.as_deref() };
|
let password = if is_guest { None } else { body.password.as_deref() };
|
||||||
|
|
||||||
// Create user
|
// Create user
|
||||||
services.users.create(&user_id, password)?;
|
services.users.create(&user_id, password, None).await?;
|
||||||
|
|
||||||
// Default to pretty displayname
|
// Default to pretty displayname
|
||||||
let mut displayname = user_id.localpart().to_owned();
|
let mut displayname = user_id.localpart().to_owned();
|
||||||
|
@ -659,7 +659,8 @@ pub(crate) async fn change_password_route(
|
||||||
|
|
||||||
services
|
services
|
||||||
.users
|
.users
|
||||||
.set_password(sender_user, Some(&body.new_password))?;
|
.set_password(sender_user, Some(&body.new_password))
|
||||||
|
.await?;
|
||||||
|
|
||||||
if body.logout_devices {
|
if body.logout_devices {
|
||||||
// Logout all devices except the current one
|
// Logout all devices except the current one
|
||||||
|
|
3
src/api/client/admin/mod.rs
Normal file
3
src/api/client/admin/mod.rs
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
mod suspend;
|
||||||
|
|
||||||
|
pub(crate) use self::suspend::*;
|
89
src/api/client/admin/suspend.rs
Normal file
89
src/api/client/admin/suspend.rs
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
use axum::extract::State;
|
||||||
|
use conduwuit::{Err, Result};
|
||||||
|
use futures::future::{join, join3};
|
||||||
|
use ruma::api::client::admin::{get_suspended, set_suspended};
|
||||||
|
|
||||||
|
use crate::Ruma;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/v1/admin/suspend/{userId}`
|
||||||
|
///
|
||||||
|
/// Check the suspension status of a target user
|
||||||
|
pub(crate) async fn get_suspended_status(
|
||||||
|
State(services): State<crate::State>,
|
||||||
|
body: Ruma<get_suspended::v1::Request>,
|
||||||
|
) -> Result<get_suspended::v1::Response> {
|
||||||
|
let sender_user = body.sender_user();
|
||||||
|
|
||||||
|
let (admin, active) =
|
||||||
|
join(services.users.is_admin(sender_user), services.users.is_active(&body.user_id)).await;
|
||||||
|
if !admin {
|
||||||
|
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
|
||||||
|
}
|
||||||
|
if !services.globals.user_is_local(&body.user_id) {
|
||||||
|
return Err!(Request(InvalidParam("Can only check the suspended status of local users")));
|
||||||
|
}
|
||||||
|
if !active {
|
||||||
|
return Err!(Request(NotFound("Unknown user")));
|
||||||
|
}
|
||||||
|
Ok(get_suspended::v1::Response::new(
|
||||||
|
services.users.is_suspended(&body.user_id).await?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/v1/admin/suspend/{userId}`
|
||||||
|
///
|
||||||
|
/// Set the suspension status of a target user
|
||||||
|
pub(crate) async fn put_suspended_status(
|
||||||
|
State(services): State<crate::State>,
|
||||||
|
body: Ruma<set_suspended::v1::Request>,
|
||||||
|
) -> Result<set_suspended::v1::Response> {
|
||||||
|
let sender_user = body.sender_user();
|
||||||
|
|
||||||
|
let (sender_admin, active, target_admin) = join3(
|
||||||
|
services.users.is_admin(sender_user),
|
||||||
|
services.users.is_active(&body.user_id),
|
||||||
|
services.users.is_admin(&body.user_id),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if !sender_admin {
|
||||||
|
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
|
||||||
|
}
|
||||||
|
if !services.globals.user_is_local(&body.user_id) {
|
||||||
|
return Err!(Request(InvalidParam("Can only set the suspended status of local users")));
|
||||||
|
}
|
||||||
|
if !active {
|
||||||
|
return Err!(Request(NotFound("Unknown user")));
|
||||||
|
}
|
||||||
|
if body.user_id == *sender_user {
|
||||||
|
return Err!(Request(Forbidden("You cannot suspend yourself")));
|
||||||
|
}
|
||||||
|
if target_admin {
|
||||||
|
return Err!(Request(Forbidden("You cannot suspend another server administrator")));
|
||||||
|
}
|
||||||
|
if services.users.is_suspended(&body.user_id).await? == body.suspended {
|
||||||
|
// No change
|
||||||
|
return Ok(set_suspended::v1::Response::new(body.suspended));
|
||||||
|
}
|
||||||
|
|
||||||
|
let action = if body.suspended {
|
||||||
|
services
|
||||||
|
.users
|
||||||
|
.suspend_account(&body.user_id, sender_user)
|
||||||
|
.await;
|
||||||
|
"suspended"
|
||||||
|
} else {
|
||||||
|
services.users.unsuspend_account(&body.user_id).await;
|
||||||
|
"unsuspended"
|
||||||
|
};
|
||||||
|
|
||||||
|
if services.config.admin_room_notices {
|
||||||
|
// Notify the admin room that an account has been un/suspended
|
||||||
|
services
|
||||||
|
.admin
|
||||||
|
.send_text(&format!("{} has been {} by {}.", body.user_id, action, sender_user))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(set_suspended::v1::Response::new(body.suspended))
|
||||||
|
}
|
|
@ -19,7 +19,7 @@ use crate::Ruma;
|
||||||
/// of this server.
|
/// of this server.
|
||||||
pub(crate) async fn get_capabilities_route(
|
pub(crate) async fn get_capabilities_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
_body: Ruma<get_capabilities::v3::Request>,
|
body: Ruma<get_capabilities::v3::Request>,
|
||||||
) -> Result<get_capabilities::v3::Response> {
|
) -> Result<get_capabilities::v3::Response> {
|
||||||
let available: BTreeMap<RoomVersionId, RoomVersionStability> =
|
let available: BTreeMap<RoomVersionId, RoomVersionStability> =
|
||||||
Server::available_room_versions().collect();
|
Server::available_room_versions().collect();
|
||||||
|
@ -45,5 +45,14 @@ pub(crate) async fn get_capabilities_route(
|
||||||
json!({"enabled": services.config.forget_forced_upon_leave}),
|
json!({"enabled": services.config.forget_forced_upon_leave}),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
if services
|
||||||
|
.users
|
||||||
|
.is_admin(body.sender_user.as_ref().unwrap())
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
// Advertise suspension API
|
||||||
|
capabilities.set("uk.timedout.msc4323", json!({"suspend":true, "lock": false}))?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(get_capabilities::v3::Response { capabilities })
|
Ok(get_capabilities::v3::Response { capabilities })
|
||||||
}
|
}
|
||||||
|
|
|
@ -156,31 +156,34 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut servers = body.via.clone();
|
let mut servers = body.via.clone();
|
||||||
servers.extend(
|
if servers.is_empty() {
|
||||||
services
|
debug!("No via servers provided for join, injecting some.");
|
||||||
.rooms
|
servers.extend(
|
||||||
.state_cache
|
services
|
||||||
.servers_invite_via(&room_id)
|
.rooms
|
||||||
.map(ToOwned::to_owned)
|
.state_cache
|
||||||
.collect::<Vec<_>>()
|
.servers_invite_via(&room_id)
|
||||||
.await,
|
.map(ToOwned::to_owned)
|
||||||
);
|
.collect::<Vec<_>>()
|
||||||
|
.await,
|
||||||
|
);
|
||||||
|
|
||||||
servers.extend(
|
servers.extend(
|
||||||
services
|
services
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.invite_state(sender_user, &room_id)
|
.invite_state(sender_user, &room_id)
|
||||||
.await
|
.await
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
.filter_map(|event| event.get_field("sender").ok().flatten())
|
||||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
||||||
.map(|user| user.server_name().to_owned()),
|
.map(|user| user.server_name().to_owned()),
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Some(server) = room_id.server_name() {
|
if let Some(server) = room_id.server_name() {
|
||||||
servers.push(server.to_owned());
|
servers.push(server.to_owned());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
servers.sort_unstable();
|
servers.sort_unstable();
|
||||||
|
|
|
@ -215,7 +215,7 @@ pub async fn leave_room(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn remote_leave_room(
|
pub async fn remote_leave_room(
|
||||||
services: &Services,
|
services: &Services,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
|
|
@ -29,7 +29,7 @@ pub(crate) use self::{
|
||||||
};
|
};
|
||||||
pub use self::{
|
pub use self::{
|
||||||
join::join_room_by_id_helper,
|
join::join_room_by_id_helper,
|
||||||
leave::{leave_all_rooms, leave_room},
|
leave::{leave_all_rooms, leave_room, remote_leave_room},
|
||||||
};
|
};
|
||||||
use crate::{Ruma, client::full_user_deactivate};
|
use crate::{Ruma, client::full_user_deactivate};
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ use conduwuit::{
|
||||||
ref_at,
|
ref_at,
|
||||||
utils::{
|
utils::{
|
||||||
IterStream, ReadyExt,
|
IterStream, ReadyExt,
|
||||||
result::{FlatOk, LogErr},
|
result::LogErr,
|
||||||
stream::{BroadbandExt, TryIgnore, WidebandExt},
|
stream::{BroadbandExt, TryIgnore, WidebandExt},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -91,7 +91,7 @@ pub(crate) async fn get_message_events_route(
|
||||||
| Direction::Backward => PduCount::max(),
|
| Direction::Backward => PduCount::max(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let to: Option<PduCount> = body.to.as_deref().map(str::parse).flat_ok();
|
let to: Option<PduCount> = body.to.as_deref().map(str::parse).transpose()?;
|
||||||
|
|
||||||
let limit: usize = body
|
let limit: usize = body
|
||||||
.limit
|
.limit
|
||||||
|
@ -181,7 +181,7 @@ pub(crate) async fn get_message_events_route(
|
||||||
|
|
||||||
Ok(get_message_events::v3::Response {
|
Ok(get_message_events::v3::Response {
|
||||||
start: from.to_string(),
|
start: from.to_string(),
|
||||||
end: next_token.as_ref().map(ToString::to_string),
|
end: next_token.as_ref().map(PduCount::to_string),
|
||||||
chunk,
|
chunk,
|
||||||
state,
|
state,
|
||||||
})
|
})
|
||||||
|
@ -320,7 +320,7 @@ pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Opti
|
||||||
filter.matches(pdu).then_some(item)
|
filter.matches(pdu).then_some(item)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(debug_assertions, conduwuit::ctor)]
|
#[cfg_attr(debug_assertions, ctor::ctor)]
|
||||||
fn _is_sorted() {
|
fn _is_sorted() {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
IGNORED_MESSAGE_TYPES.is_sorted(),
|
IGNORED_MESSAGE_TYPES.is_sorted(),
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
pub(super) mod account;
|
pub(super) mod account;
|
||||||
pub(super) mod account_data;
|
pub(super) mod account_data;
|
||||||
|
pub(super) mod admin;
|
||||||
pub(super) mod alias;
|
pub(super) mod alias;
|
||||||
pub(super) mod appservice;
|
pub(super) mod appservice;
|
||||||
pub(super) mod backup;
|
pub(super) mod backup;
|
||||||
|
@ -42,6 +43,7 @@ pub(super) mod well_known;
|
||||||
pub use account::full_user_deactivate;
|
pub use account::full_user_deactivate;
|
||||||
pub(super) use account::*;
|
pub(super) use account::*;
|
||||||
pub(super) use account_data::*;
|
pub(super) use account_data::*;
|
||||||
|
pub(super) use admin::*;
|
||||||
pub(super) use alias::*;
|
pub(super) use alias::*;
|
||||||
pub(super) use appservice::*;
|
pub(super) use appservice::*;
|
||||||
pub(super) use backup::*;
|
pub(super) use backup::*;
|
||||||
|
@ -54,7 +56,7 @@ pub(super) use keys::*;
|
||||||
pub(super) use media::*;
|
pub(super) use media::*;
|
||||||
pub(super) use media_legacy::*;
|
pub(super) use media_legacy::*;
|
||||||
pub(super) use membership::*;
|
pub(super) use membership::*;
|
||||||
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room};
|
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room};
|
||||||
pub(super) use message::*;
|
pub(super) use message::*;
|
||||||
pub(super) use openid::*;
|
pub(super) use openid::*;
|
||||||
pub(super) use presence::*;
|
pub(super) use presence::*;
|
||||||
|
|
|
@ -90,7 +90,7 @@ pub(crate) async fn get_displayname_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None)?;
|
services.users.create(&body.user_id, None, None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -189,7 +189,7 @@ pub(crate) async fn get_avatar_url_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None)?;
|
services.users.create(&body.user_id, None, None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -248,7 +248,7 @@ pub(crate) async fn get_profile_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None)?;
|
services.users.create(&body.user_id, None, None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
|
|
@ -117,7 +117,7 @@ async fn paginate_relations_with_filter(
|
||||||
| Direction::Backward => PduCount::max(),
|
| Direction::Backward => PduCount::max(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let to: Option<PduCount> = to.map(str::parse).flat_ok();
|
let to: Option<PduCount> = to.map(str::parse).transpose()?;
|
||||||
|
|
||||||
// Use limit or else 30, with maximum 100
|
// Use limit or else 30, with maximum 100
|
||||||
let limit: usize = limit
|
let limit: usize = limit
|
||||||
|
@ -129,6 +129,11 @@ async fn paginate_relations_with_filter(
|
||||||
// Spec (v1.10) recommends depth of at least 3
|
// Spec (v1.10) recommends depth of at least 3
|
||||||
let depth: u8 = if recurse { 3 } else { 1 };
|
let depth: u8 = if recurse { 3 } else { 1 };
|
||||||
|
|
||||||
|
// Check if this is a thread request
|
||||||
|
let is_thread = filter_rel_type
|
||||||
|
.as_ref()
|
||||||
|
.is_some_and(|rel| *rel == RelationType::Thread);
|
||||||
|
|
||||||
let events: Vec<_> = services
|
let events: Vec<_> = services
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_metadata
|
.pdu_metadata
|
||||||
|
@ -152,23 +157,58 @@ async fn paginate_relations_with_filter(
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let next_batch = match dir {
|
// For threads, check if we should include the root event
|
||||||
| Direction::Forward => events.last(),
|
let mut root_event = None;
|
||||||
| Direction::Backward => events.first(),
|
if is_thread && dir == Direction::Backward {
|
||||||
|
// Check if we've reached the beginning of the thread
|
||||||
|
// (fewer events than requested means we've exhausted the thread)
|
||||||
|
if events.len() < limit {
|
||||||
|
// Try to get the thread root event
|
||||||
|
if let Ok(root_pdu) = services.rooms.timeline.get_pdu(target).await {
|
||||||
|
// Check visibility
|
||||||
|
if services
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.user_can_see_event(sender_user, room_id, target)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
// Store the root event to add to the response
|
||||||
|
root_event = Some(root_pdu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
.map(at!(0))
|
|
||||||
.as_ref()
|
// Determine if there are more events to fetch
|
||||||
.map(ToString::to_string);
|
let has_more = if root_event.is_some() {
|
||||||
|
false // We've included the root, no more events
|
||||||
|
} else {
|
||||||
|
// Check if we got a full page of results (might be more)
|
||||||
|
events.len() >= limit
|
||||||
|
};
|
||||||
|
|
||||||
|
let next_batch = if has_more {
|
||||||
|
match dir {
|
||||||
|
| Direction::Forward => events.last(),
|
||||||
|
| Direction::Backward => events.first(),
|
||||||
|
}
|
||||||
|
.map(|(count, _)| count.to_string())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Build the response chunk with thread root if needed
|
||||||
|
let chunk: Vec<_> = root_event
|
||||||
|
.into_iter()
|
||||||
|
.map(Event::into_format)
|
||||||
|
.chain(events.into_iter().map(at!(1)).map(Event::into_format))
|
||||||
|
.collect();
|
||||||
|
|
||||||
Ok(get_relating_events::v1::Response {
|
Ok(get_relating_events::v1::Response {
|
||||||
next_batch,
|
next_batch,
|
||||||
prev_batch: from.map(Into::into),
|
prev_batch: from.map(Into::into),
|
||||||
recursion_depth: recurse.then_some(depth.into()),
|
recursion_depth: recurse.then_some(depth.into()),
|
||||||
chunk: events
|
chunk,
|
||||||
.into_iter()
|
|
||||||
.map(at!(1))
|
|
||||||
.map(Event::into_format)
|
|
||||||
.collect(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ pub(crate) async fn get_room_event_route(
|
||||||
let event = services
|
let event = services
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.get_pdu(event_id)
|
.get_remote_pdu(room_id, event_id)
|
||||||
.map_err(|_| err!(Request(NotFound("Event {} not found.", event_id))));
|
.map_err(|_| err!(Request(NotFound("Event {} not found.", event_id))));
|
||||||
|
|
||||||
let visible = services
|
let visible = services
|
||||||
|
@ -33,11 +33,6 @@ pub(crate) async fn get_room_event_route(
|
||||||
return Err!(Request(Forbidden("You don't have permission to view this event.")));
|
return Err!(Request(Forbidden("You don't have permission to view this event.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
debug_assert!(
|
|
||||||
event.event_id() == event_id && event.room_id() == room_id,
|
|
||||||
"Fetched PDU must match requested"
|
|
||||||
);
|
|
||||||
|
|
||||||
event.add_age().ok();
|
event.add_age().ok();
|
||||||
|
|
||||||
Ok(get_room_event::v3::Response { event: event.into_format() })
|
Ok(get_room_event::v3::Response { event: event.into_format() })
|
||||||
|
|
|
@ -3,13 +3,14 @@ use std::time::Duration;
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Error, Result, debug, err, info, utils,
|
Err, Error, Result, debug, err, info,
|
||||||
utils::{ReadyExt, hash},
|
utils::{self, ReadyExt, hash},
|
||||||
};
|
};
|
||||||
use conduwuit_service::uiaa::SESSION_ID_LENGTH;
|
use conduwuit_core::{debug_error, debug_warn};
|
||||||
|
use conduwuit_service::{Services, uiaa::SESSION_ID_LENGTH};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
UserId,
|
OwnedUserId, UserId,
|
||||||
api::client::{
|
api::client::{
|
||||||
session::{
|
session::{
|
||||||
get_login_token,
|
get_login_token,
|
||||||
|
@ -49,6 +50,154 @@ pub(crate) async fn get_login_types_route(
|
||||||
]))
|
]))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Authenticates the given user by its ID and its password.
|
||||||
|
///
|
||||||
|
/// Returns the user ID if successful, and an error otherwise.
|
||||||
|
#[tracing::instrument(skip_all, fields(%user_id), name = "password")]
|
||||||
|
pub(crate) async fn password_login(
|
||||||
|
services: &Services,
|
||||||
|
user_id: &UserId,
|
||||||
|
lowercased_user_id: &UserId,
|
||||||
|
password: &str,
|
||||||
|
) -> Result<OwnedUserId> {
|
||||||
|
// Restrict login to accounts only of type 'password', including untyped
|
||||||
|
// legacy accounts which are equivalent to 'password'.
|
||||||
|
if services
|
||||||
|
.users
|
||||||
|
.origin(user_id)
|
||||||
|
.await
|
||||||
|
.is_ok_and(|origin| origin != "password")
|
||||||
|
{
|
||||||
|
return Err!(Request(Forbidden("Account does not permit password login.")));
|
||||||
|
}
|
||||||
|
|
||||||
|
let (hash, user_id) = match services.users.password_hash(user_id).await {
|
||||||
|
| Ok(hash) => (hash, user_id),
|
||||||
|
| Err(_) => services
|
||||||
|
.users
|
||||||
|
.password_hash(lowercased_user_id)
|
||||||
|
.await
|
||||||
|
.map(|hash| (hash, lowercased_user_id))
|
||||||
|
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?,
|
||||||
|
};
|
||||||
|
|
||||||
|
if hash.is_empty() {
|
||||||
|
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
||||||
|
}
|
||||||
|
|
||||||
|
hash::verify_password(password, &hash)
|
||||||
|
.inspect_err(|e| debug_error!("{e}"))
|
||||||
|
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
||||||
|
|
||||||
|
Ok(user_id.to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Authenticates the given user through the configured LDAP server.
|
||||||
|
///
|
||||||
|
/// Creates the user if the user is found in the LDAP and do not already have an
|
||||||
|
/// account.
|
||||||
|
#[tracing::instrument(skip_all, fields(%user_id), name = "ldap")]
|
||||||
|
pub(super) async fn ldap_login(
|
||||||
|
services: &Services,
|
||||||
|
user_id: &UserId,
|
||||||
|
lowercased_user_id: &UserId,
|
||||||
|
password: &str,
|
||||||
|
) -> Result<OwnedUserId> {
|
||||||
|
let (user_dn, is_ldap_admin) = match services.config.ldap.bind_dn.as_ref() {
|
||||||
|
| Some(bind_dn) if bind_dn.contains("{username}") =>
|
||||||
|
(bind_dn.replace("{username}", lowercased_user_id.localpart()), false),
|
||||||
|
| _ => {
|
||||||
|
debug!("Searching user in LDAP");
|
||||||
|
|
||||||
|
let dns = services.users.search_ldap(user_id).await?;
|
||||||
|
if dns.len() >= 2 {
|
||||||
|
return Err!(Ldap("LDAP search returned two or more results"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some((user_dn, is_admin)) = dns.first() else {
|
||||||
|
return password_login(services, user_id, lowercased_user_id, password).await;
|
||||||
|
};
|
||||||
|
|
||||||
|
(user_dn.clone(), *is_admin)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let user_id = services
|
||||||
|
.users
|
||||||
|
.auth_ldap(&user_dn, password)
|
||||||
|
.await
|
||||||
|
.map(|()| lowercased_user_id.to_owned())?;
|
||||||
|
|
||||||
|
// LDAP users are automatically created on first login attempt. This is a very
|
||||||
|
// common feature that can be seen on many services using a LDAP provider for
|
||||||
|
// their users (synapse, Nextcloud, Jellyfin, ...).
|
||||||
|
//
|
||||||
|
// LDAP users are crated with a dummy password but non empty because an empty
|
||||||
|
// password is reserved for deactivated accounts. The conduwuit password field
|
||||||
|
// will never be read to login a LDAP user so it's not an issue.
|
||||||
|
if !services.users.exists(lowercased_user_id).await {
|
||||||
|
services
|
||||||
|
.users
|
||||||
|
.create(lowercased_user_id, Some("*"), Some("ldap"))
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let is_conduwuit_admin = services.admin.user_is_admin(lowercased_user_id).await;
|
||||||
|
|
||||||
|
if is_ldap_admin && !is_conduwuit_admin {
|
||||||
|
services.admin.make_user_admin(lowercased_user_id).await?;
|
||||||
|
} else if !is_ldap_admin && is_conduwuit_admin {
|
||||||
|
services.admin.revoke_admin(lowercased_user_id).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(user_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn handle_login(
|
||||||
|
services: &Services,
|
||||||
|
body: &Ruma<login::v3::Request>,
|
||||||
|
identifier: Option<&uiaa::UserIdentifier>,
|
||||||
|
password: &str,
|
||||||
|
user: Option<&String>,
|
||||||
|
) -> Result<OwnedUserId> {
|
||||||
|
debug!("Got password login type");
|
||||||
|
let user_id =
|
||||||
|
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||||
|
UserId::parse_with_server_name(user_id, &services.config.server_name)
|
||||||
|
} else if let Some(user) = user {
|
||||||
|
UserId::parse_with_server_name(user, &services.config.server_name)
|
||||||
|
} else {
|
||||||
|
return Err!(Request(Unknown(
|
||||||
|
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
|
||||||
|
|
||||||
|
let lowercased_user_id = UserId::parse_with_server_name(
|
||||||
|
user_id.localpart().to_lowercase(),
|
||||||
|
&services.config.server_name,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if !services.globals.user_is_local(&user_id)
|
||||||
|
|| !services.globals.user_is_local(&lowercased_user_id)
|
||||||
|
{
|
||||||
|
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg!(feature = "ldap") && services.config.ldap.enable {
|
||||||
|
match Box::pin(ldap_login(services, &user_id, &lowercased_user_id, password)).await {
|
||||||
|
| Ok(user_id) => Ok(user_id),
|
||||||
|
| Err(err) if services.config.ldap.ldap_only => Err(err),
|
||||||
|
| Err(err) => {
|
||||||
|
debug_warn!("{err}");
|
||||||
|
password_login(services, &user_id, &lowercased_user_id, password).await
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
password_login(services, &user_id, &lowercased_user_id, password).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// # `POST /_matrix/client/v3/login`
|
/// # `POST /_matrix/client/v3/login`
|
||||||
///
|
///
|
||||||
/// Authenticates the user and returns an access token it can use in subsequent
|
/// Authenticates the user and returns an access token it can use in subsequent
|
||||||
|
@ -80,70 +229,7 @@ pub(crate) async fn login_route(
|
||||||
password,
|
password,
|
||||||
user,
|
user,
|
||||||
..
|
..
|
||||||
}) => {
|
}) => handle_login(&services, &body, identifier.as_ref(), password, user.as_ref()).await?,
|
||||||
debug!("Got password login type");
|
|
||||||
let user_id =
|
|
||||||
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
|
||||||
UserId::parse_with_server_name(user_id, &services.config.server_name)
|
|
||||||
} else if let Some(user) = user {
|
|
||||||
UserId::parse_with_server_name(user, &services.config.server_name)
|
|
||||||
} else {
|
|
||||||
return Err!(Request(Unknown(
|
|
||||||
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
|
|
||||||
|
|
||||||
let lowercased_user_id = UserId::parse_with_server_name(
|
|
||||||
user_id.localpart().to_lowercase(),
|
|
||||||
&services.config.server_name,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if !services.globals.user_is_local(&user_id)
|
|
||||||
|| !services.globals.user_is_local(&lowercased_user_id)
|
|
||||||
{
|
|
||||||
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
|
|
||||||
}
|
|
||||||
|
|
||||||
// first try the username as-is
|
|
||||||
let hash = services
|
|
||||||
.users
|
|
||||||
.password_hash(&user_id)
|
|
||||||
.await
|
|
||||||
.inspect_err(|e| debug!("{e}"));
|
|
||||||
|
|
||||||
match hash {
|
|
||||||
| Ok(hash) => {
|
|
||||||
if hash.is_empty() {
|
|
||||||
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
|
||||||
}
|
|
||||||
|
|
||||||
hash::verify_password(password, &hash)
|
|
||||||
.inspect_err(|e| debug!("{e}"))
|
|
||||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
|
||||||
|
|
||||||
user_id
|
|
||||||
},
|
|
||||||
| Err(_e) => {
|
|
||||||
let hash_lowercased_user_id = services
|
|
||||||
.users
|
|
||||||
.password_hash(&lowercased_user_id)
|
|
||||||
.await
|
|
||||||
.inspect_err(|e| debug!("{e}"))
|
|
||||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
|
||||||
|
|
||||||
if hash_lowercased_user_id.is_empty() {
|
|
||||||
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
|
||||||
}
|
|
||||||
|
|
||||||
hash::verify_password(password, &hash_lowercased_user_id)
|
|
||||||
.inspect_err(|e| debug!("{e}"))
|
|
||||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
|
||||||
|
|
||||||
lowercased_user_id
|
|
||||||
},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
| login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
| login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
||||||
debug!("Got token login type");
|
debug!("Got token login type");
|
||||||
if !services.server.config.login_via_existing_session {
|
if !services.server.config.login_via_existing_session {
|
||||||
|
@ -198,8 +284,8 @@ pub(crate) async fn login_route(
|
||||||
.clone()
|
.clone()
|
||||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||||
|
|
||||||
// Generate a new token for the device
|
// Generate a new token for the device (ensuring no collisions)
|
||||||
let token = utils::random_string(TOKEN_LENGTH);
|
let token = services.users.generate_unique_token().await;
|
||||||
|
|
||||||
// Determine if device_id was provided and exists in the db for this user
|
// Determine if device_id was provided and exists in the db for this user
|
||||||
let device_exists = if body.device_id.is_some() {
|
let device_exists = if body.device_id.is_some() {
|
||||||
|
|
|
@ -430,7 +430,7 @@ async fn handle_left_room(
|
||||||
.ok();
|
.ok();
|
||||||
|
|
||||||
// Left before last sync
|
// Left before last sync
|
||||||
if Some(since) >= left_count {
|
if (Some(since) >= left_count && !include_leave) || Some(next_batch) < left_count {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,6 +45,7 @@ use crate::{
|
||||||
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
||||||
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
||||||
|
|
||||||
|
#[allow(clippy::cognitive_complexity)]
|
||||||
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
|
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
|
||||||
///
|
///
|
||||||
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
|
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
|
||||||
|
|
|
@ -292,7 +292,7 @@ pub(crate) async fn get_timezone_key_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None)?;
|
services.users.create(&body.user_id, None, None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -352,7 +352,7 @@ pub(crate) async fn get_profile_key_route(
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
if !services.users.exists(&body.user_id).await {
|
if !services.users.exists(&body.user_id).await {
|
||||||
services.users.create(&body.user_id, None)?;
|
services.users.create(&body.user_id, None, None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
|
|
@ -58,6 +58,7 @@ pub(crate) async fn get_supported_versions_route(
|
||||||
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
|
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
|
||||||
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
|
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
|
||||||
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
|
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
|
||||||
|
("uk.timedout.msc4323".to_owned(), true), /* agnostic suspend (https://github.com/matrix-org/matrix-spec-proposals/pull/4323) */
|
||||||
]),
|
]),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -184,6 +184,8 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
|
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
|
||||||
get(client::get_room_summary_legacy)
|
get(client::get_room_summary_legacy)
|
||||||
)
|
)
|
||||||
|
.ruma_route(&client::get_suspended_status)
|
||||||
|
.ruma_route(&client::put_suspended_status)
|
||||||
.ruma_route(&client::well_known_support)
|
.ruma_route(&client::well_known_support)
|
||||||
.ruma_route(&client::well_known_client)
|
.ruma_route(&client::well_known_client)
|
||||||
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
||||||
|
|
|
@ -5,6 +5,14 @@ use axum_extra::{
|
||||||
typed_header::TypedHeaderRejectionReason,
|
typed_header::TypedHeaderRejectionReason,
|
||||||
};
|
};
|
||||||
use conduwuit::{Err, Error, Result, debug_error, err, warn};
|
use conduwuit::{Err, Error, Result, debug_error, err, warn};
|
||||||
|
use futures::{
|
||||||
|
TryFutureExt,
|
||||||
|
future::{
|
||||||
|
Either::{Left, Right},
|
||||||
|
select_ok,
|
||||||
|
},
|
||||||
|
pin_mut,
|
||||||
|
};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
||||||
api::{
|
api::{
|
||||||
|
@ -54,17 +62,7 @@ pub(super) async fn auth(
|
||||||
| None => request.query.access_token.as_deref(),
|
| None => request.query.access_token.as_deref(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let token = if let Some(token) = token {
|
let token = find_token(services, token).await?;
|
||||||
match services.appservice.find_from_token(token).await {
|
|
||||||
| Some(reg_info) => Token::Appservice(Box::new(reg_info)),
|
|
||||||
| _ => match services.users.find_from_token(token).await {
|
|
||||||
| Ok((user_id, device_id)) => Token::User((user_id, device_id)),
|
|
||||||
| _ => Token::Invalid,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Token::None
|
|
||||||
};
|
|
||||||
|
|
||||||
if metadata.authentication == AuthScheme::None {
|
if metadata.authentication == AuthScheme::None {
|
||||||
match metadata {
|
match metadata {
|
||||||
|
@ -342,3 +340,25 @@ async fn parse_x_matrix(request: &mut Request) -> Result<XMatrix> {
|
||||||
|
|
||||||
Ok(x_matrix)
|
Ok(x_matrix)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn find_token(services: &Services, token: Option<&str>) -> Result<Token> {
|
||||||
|
let Some(token) = token else {
|
||||||
|
return Ok(Token::None);
|
||||||
|
};
|
||||||
|
|
||||||
|
let user_token = services.users.find_from_token(token).map_ok(Token::User);
|
||||||
|
|
||||||
|
let appservice_token = services
|
||||||
|
.appservice
|
||||||
|
.find_from_token(token)
|
||||||
|
.map_ok(Box::new)
|
||||||
|
.map_ok(Token::Appservice);
|
||||||
|
|
||||||
|
pin_mut!(user_token, appservice_token);
|
||||||
|
// Returns Ok if either token type succeeds, Err only if both fail
|
||||||
|
match select_ok([Left(user_token), Right(appservice_token)]).await {
|
||||||
|
| Err(e) if !e.is_not_found() => Err(e),
|
||||||
|
| Ok((token, _)) => Ok(token),
|
||||||
|
| _ => Ok(Token::Invalid),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
#![allow(deprecated)]
|
#![allow(deprecated)]
|
||||||
|
|
||||||
use std::borrow::Borrow;
|
use std::{borrow::Borrow, time::Instant, vec};
|
||||||
|
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, at, err,
|
Err, Event, Result, at, debug, err, info,
|
||||||
matrix::event::gen_event_id_canonical_json,
|
matrix::event::gen_event_id_canonical_json,
|
||||||
utils::stream::{IterStream, TryBroadbandExt},
|
trace,
|
||||||
|
utils::stream::{BroadbandExt, IterStream, TryBroadbandExt},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
|
@ -25,12 +26,14 @@ use serde_json::value::{RawValue as RawJsonValue, to_raw_value};
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
/// helper method for /send_join v1 and v2
|
/// helper method for /send_join v1 and v2
|
||||||
|
#[tracing::instrument(skip(services, pdu, omit_members), fields(room_id = room_id.as_str(), origin = origin.as_str()))]
|
||||||
async fn create_join_event(
|
async fn create_join_event(
|
||||||
services: &Services,
|
services: &Services,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
) -> Result<create_join_event::v1::RoomState> {
|
omit_members: bool,
|
||||||
|
) -> Result<create_join_event::v2::RoomState> {
|
||||||
if !services.rooms.metadata.exists(room_id).await {
|
if !services.rooms.metadata.exists(room_id).await {
|
||||||
return Err!(Request(NotFound("Room is unknown to this server.")));
|
return Err!(Request(NotFound("Room is unknown to this server.")));
|
||||||
}
|
}
|
||||||
|
@ -53,8 +56,10 @@ async fn create_join_event(
|
||||||
|
|
||||||
// We do not add the event_id field to the pdu here because of signature and
|
// We do not add the event_id field to the pdu here because of signature and
|
||||||
// hashes checks
|
// hashes checks
|
||||||
|
trace!("Getting room version");
|
||||||
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
|
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
|
||||||
|
|
||||||
|
trace!("Generating event ID and converting to canonical json");
|
||||||
let Ok((event_id, mut value)) = gen_event_id_canonical_json(pdu, &room_version_id) else {
|
let Ok((event_id, mut value)) = gen_event_id_canonical_json(pdu, &room_version_id) else {
|
||||||
// Event could not be converted to canonical json
|
// Event could not be converted to canonical json
|
||||||
return Err!(Request(BadJson("Could not convert event to canonical json.")));
|
return Err!(Request(BadJson("Could not convert event to canonical json.")));
|
||||||
|
@ -103,7 +108,6 @@ async fn create_join_event(
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// ACL check sender user server name
|
|
||||||
let sender: OwnedUserId = serde_json::from_value(
|
let sender: OwnedUserId = serde_json::from_value(
|
||||||
value
|
value
|
||||||
.get("sender")
|
.get("sender")
|
||||||
|
@ -113,12 +117,6 @@ async fn create_join_event(
|
||||||
)
|
)
|
||||||
.map_err(|e| err!(Request(BadJson(warn!("sender property is not a valid user ID: {e}")))))?;
|
.map_err(|e| err!(Request(BadJson(warn!("sender property is not a valid user ID: {e}")))))?;
|
||||||
|
|
||||||
services
|
|
||||||
.rooms
|
|
||||||
.event_handler
|
|
||||||
.acl_check(sender.server_name(), room_id)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// check if origin server is trying to send for another server
|
// check if origin server is trying to send for another server
|
||||||
if sender.server_name() != origin {
|
if sender.server_name() != origin {
|
||||||
return Err!(Request(Forbidden("Not allowed to join on behalf of another server.")));
|
return Err!(Request(Forbidden("Not allowed to join on behalf of another server.")));
|
||||||
|
@ -180,11 +178,6 @@ async fn create_join_event(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
|
||||||
.server_keys
|
|
||||||
.hash_and_sign_event(&mut value, &room_version_id)
|
|
||||||
.map_err(|e| err!(Request(InvalidParam(warn!("Failed to sign send_join event: {e}")))))?;
|
|
||||||
|
|
||||||
let origin: OwnedServerName = serde_json::from_value(
|
let origin: OwnedServerName = serde_json::from_value(
|
||||||
value
|
value
|
||||||
.get("origin")
|
.get("origin")
|
||||||
|
@ -194,6 +187,12 @@ async fn create_join_event(
|
||||||
)
|
)
|
||||||
.map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?;
|
.map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?;
|
||||||
|
|
||||||
|
trace!("Signing send_join event");
|
||||||
|
services
|
||||||
|
.server_keys
|
||||||
|
.hash_and_sign_event(&mut value, &room_version_id)
|
||||||
|
.map_err(|e| err!(Request(InvalidParam(warn!("Failed to sign send_join event: {e}")))))?;
|
||||||
|
|
||||||
let mutex_lock = services
|
let mutex_lock = services
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
|
@ -201,6 +200,7 @@ async fn create_join_event(
|
||||||
.lock(room_id)
|
.lock(room_id)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
trace!("Acquired send_join mutex, persisting join event");
|
||||||
let pdu_id = services
|
let pdu_id = services
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
|
@ -210,7 +210,7 @@ async fn create_join_event(
|
||||||
.ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?;
|
.ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?;
|
||||||
|
|
||||||
drop(mutex_lock);
|
drop(mutex_lock);
|
||||||
|
trace!("Fetching current state IDs");
|
||||||
let state_ids: Vec<OwnedEventId> = services
|
let state_ids: Vec<OwnedEventId> = services
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
|
@ -219,9 +219,23 @@ async fn create_join_event(
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
trace!(%omit_members, "Constructing current state");
|
||||||
let state = state_ids
|
let state = state_ids
|
||||||
.iter()
|
.iter()
|
||||||
.try_stream()
|
.try_stream()
|
||||||
|
.broad_filter_map(|event_id| async move {
|
||||||
|
if omit_members {
|
||||||
|
if let Ok(e) = event_id.as_ref() {
|
||||||
|
let pdu = services.rooms.timeline.get_pdu(e).await;
|
||||||
|
if pdu.is_ok_and(|p| p.kind().to_cow_str() == "m.room.member") {
|
||||||
|
trace!("omitting member event {e:?} from returned state");
|
||||||
|
// skip members
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(event_id)
|
||||||
|
})
|
||||||
.broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id))
|
.broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id))
|
||||||
.broad_and_then(|pdu| {
|
.broad_and_then(|pdu| {
|
||||||
services
|
services
|
||||||
|
@ -234,6 +248,7 @@ async fn create_join_event(
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let starting_events = state_ids.iter().map(Borrow::borrow);
|
let starting_events = state_ids.iter().map(Borrow::borrow);
|
||||||
|
trace!("Constructing auth chain");
|
||||||
let auth_chain = services
|
let auth_chain = services
|
||||||
.rooms
|
.rooms
|
||||||
.auth_chain
|
.auth_chain
|
||||||
|
@ -250,13 +265,37 @@ async fn create_join_event(
|
||||||
.try_collect()
|
.try_collect()
|
||||||
.boxed()
|
.boxed()
|
||||||
.await?;
|
.await?;
|
||||||
|
info!(fast_join = %omit_members, "Sending join event to other servers");
|
||||||
services.sending.send_pdu_room(room_id, &pdu_id).await?;
|
services.sending.send_pdu_room(room_id, &pdu_id).await?;
|
||||||
|
debug!("Finished sending join event");
|
||||||
Ok(create_join_event::v1::RoomState {
|
let servers_in_room: Option<Vec<_>> = if !omit_members {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
trace!("Fetching list of servers in room");
|
||||||
|
let servers: Vec<String> = services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.room_servers(room_id)
|
||||||
|
.map(|sn| sn.as_str().to_owned())
|
||||||
|
.collect()
|
||||||
|
.await;
|
||||||
|
// If there's no servers, just add us
|
||||||
|
let servers = if servers.is_empty() {
|
||||||
|
warn!("Failed to find any servers, adding our own server name as a last resort");
|
||||||
|
vec![services.globals.server_name().to_string()]
|
||||||
|
} else {
|
||||||
|
trace!("Found {} servers in room", servers.len());
|
||||||
|
servers
|
||||||
|
};
|
||||||
|
Some(servers)
|
||||||
|
};
|
||||||
|
debug!("Returning send_join data");
|
||||||
|
Ok(create_join_event::v2::RoomState {
|
||||||
auth_chain,
|
auth_chain,
|
||||||
state,
|
state,
|
||||||
event: to_raw_value(&CanonicalJsonValue::Object(value)).ok(),
|
event: to_raw_value(&CanonicalJsonValue::Object(value)).ok(),
|
||||||
|
members_omitted: omit_members,
|
||||||
|
servers_in_room,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,11 +333,23 @@ pub(crate) async fn create_join_event_v1_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu)
|
let now = Instant::now();
|
||||||
|
let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu, false)
|
||||||
.boxed()
|
.boxed()
|
||||||
.await?;
|
.await?;
|
||||||
|
let transformed = create_join_event::v1::RoomState {
|
||||||
|
auth_chain: room_state.auth_chain,
|
||||||
|
state: room_state.state,
|
||||||
|
event: room_state.event,
|
||||||
|
};
|
||||||
|
info!(
|
||||||
|
"Finished sending a join for {} in {} in {:?}",
|
||||||
|
body.origin(),
|
||||||
|
&body.room_id,
|
||||||
|
now.elapsed()
|
||||||
|
);
|
||||||
|
|
||||||
Ok(create_join_event::v1::Response { room_state })
|
Ok(create_join_event::v1::Response { room_state: transformed })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}`
|
/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}`
|
||||||
|
@ -329,17 +380,17 @@ pub(crate) async fn create_join_event_v2_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let create_join_event::v1::RoomState { auth_chain, state, event } =
|
let now = Instant::now();
|
||||||
create_join_event(&services, body.origin(), &body.room_id, &body.pdu)
|
let room_state =
|
||||||
|
create_join_event(&services, body.origin(), &body.room_id, &body.pdu, body.omit_members)
|
||||||
.boxed()
|
.boxed()
|
||||||
.await?;
|
.await?;
|
||||||
let room_state = create_join_event::v2::RoomState {
|
info!(
|
||||||
members_omitted: false,
|
"Finished sending a join for {} in {} in {:?}",
|
||||||
auth_chain,
|
body.origin(),
|
||||||
state,
|
&body.room_id,
|
||||||
event,
|
now.elapsed()
|
||||||
servers_in_room: None,
|
);
|
||||||
};
|
|
||||||
|
|
||||||
Ok(create_join_event::v2::Response { room_state })
|
Ok(create_join_event::v2::Response { room_state })
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
#![allow(clippy::doc_link_with_quotes)]
|
||||||
pub mod check;
|
pub mod check;
|
||||||
pub mod manager;
|
pub mod manager;
|
||||||
pub mod proxy;
|
pub mod proxy;
|
||||||
|
@ -125,9 +126,11 @@ pub struct Config {
|
||||||
/// This is the only directory where continuwuity will save its data,
|
/// This is the only directory where continuwuity will save its data,
|
||||||
/// including media. Note: this was previously "/var/lib/matrix-conduit".
|
/// including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||||
///
|
///
|
||||||
/// YOU NEED TO EDIT THIS.
|
/// YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
|
||||||
|
/// `systemd` service. The service file sets it to `/var/lib/conduwuit`
|
||||||
|
/// using an environment variable and also grants write access.
|
||||||
///
|
///
|
||||||
/// example: "/var/lib/continuwuity"
|
/// example: "/var/lib/conduwuit"
|
||||||
pub database_path: PathBuf,
|
pub database_path: PathBuf,
|
||||||
|
|
||||||
/// continuwuity supports online database backups using RocksDB's Backup
|
/// continuwuity supports online database backups using RocksDB's Backup
|
||||||
|
@ -711,12 +714,21 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub well_known: WellKnownConfig,
|
pub well_known: WellKnownConfig,
|
||||||
|
|
||||||
#[serde(default)]
|
/// Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
|
||||||
pub allow_jaeger: bool,
|
/// Jaeger exporter. Traces will be sent via OTLP to a collector (such as
|
||||||
|
/// Jaeger) that supports the OpenTelemetry Protocol.
|
||||||
|
///
|
||||||
|
/// Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
|
||||||
|
/// environment variable (defaults to http://localhost:4318).
|
||||||
|
#[serde(default, alias = "allow_jaeger")]
|
||||||
|
pub allow_otlp: bool,
|
||||||
|
|
||||||
|
/// Filter for OTLP tracing spans. This controls which spans are exported
|
||||||
|
/// to the OTLP collector.
|
||||||
|
///
|
||||||
/// default: "info"
|
/// default: "info"
|
||||||
#[serde(default = "default_jaeger_filter")]
|
#[serde(default = "default_otlp_filter", alias = "jaeger_filter")]
|
||||||
pub jaeger_filter: String,
|
pub otlp_filter: String,
|
||||||
|
|
||||||
/// If the 'perf_measurements' compile-time feature is enabled, enables
|
/// If the 'perf_measurements' compile-time feature is enabled, enables
|
||||||
/// collecting folded stack trace profile of tracing spans using
|
/// collecting folded stack trace profile of tracing spans using
|
||||||
|
@ -1947,6 +1959,10 @@ pub struct Config {
|
||||||
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
|
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
|
||||||
bool,
|
bool,
|
||||||
|
|
||||||
|
// external structure; separate section
|
||||||
|
#[serde(default)]
|
||||||
|
pub ldap: LdapConfig,
|
||||||
|
|
||||||
// external structure; separate section
|
// external structure; separate section
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub blurhashing: BlurhashConfig,
|
pub blurhashing: BlurhashConfig,
|
||||||
|
@ -2041,6 +2057,114 @@ pub struct BlurhashConfig {
|
||||||
pub blurhash_max_raw_size: u64,
|
pub blurhash_max_raw_size: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Default, Deserialize)]
|
||||||
|
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.ldap")]
|
||||||
|
pub struct LdapConfig {
|
||||||
|
/// Whether to enable LDAP login.
|
||||||
|
///
|
||||||
|
/// example: "true"
|
||||||
|
#[serde(default)]
|
||||||
|
pub enable: bool,
|
||||||
|
|
||||||
|
/// Whether to force LDAP authentication or authorize classical password
|
||||||
|
/// login.
|
||||||
|
///
|
||||||
|
/// example: "true"
|
||||||
|
#[serde(default)]
|
||||||
|
pub ldap_only: bool,
|
||||||
|
|
||||||
|
/// URI of the LDAP server.
|
||||||
|
///
|
||||||
|
/// example: "ldap://ldap.example.com:389"
|
||||||
|
///
|
||||||
|
/// default: ""
|
||||||
|
#[serde(default)]
|
||||||
|
pub uri: Option<Url>,
|
||||||
|
|
||||||
|
/// Root of the searches.
|
||||||
|
///
|
||||||
|
/// example: "ou=users,dc=example,dc=org"
|
||||||
|
///
|
||||||
|
/// default: ""
|
||||||
|
#[serde(default)]
|
||||||
|
pub base_dn: String,
|
||||||
|
|
||||||
|
/// Bind DN if anonymous search is not enabled.
|
||||||
|
///
|
||||||
|
/// You can use the variable `{username}` that will be replaced by the
|
||||||
|
/// entered username. In such case, the password used to bind will be the
|
||||||
|
/// one provided for the login and not the one given by
|
||||||
|
/// `bind_password_file`. Beware: automatically granting admin rights will
|
||||||
|
/// not work if you use this direct bind instead of a LDAP search.
|
||||||
|
///
|
||||||
|
/// example: "cn=ldap-reader,dc=example,dc=org" or
|
||||||
|
/// "cn={username},ou=users,dc=example,dc=org"
|
||||||
|
///
|
||||||
|
/// default: ""
|
||||||
|
#[serde(default)]
|
||||||
|
pub bind_dn: Option<String>,
|
||||||
|
|
||||||
|
/// Path to a file on the system that contains the password for the
|
||||||
|
/// `bind_dn`.
|
||||||
|
///
|
||||||
|
/// The server must be able to access the file, and it must not be empty.
|
||||||
|
///
|
||||||
|
/// default: ""
|
||||||
|
#[serde(default)]
|
||||||
|
pub bind_password_file: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// Search filter to limit user searches.
|
||||||
|
///
|
||||||
|
/// You can use the variable `{username}` that will be replaced by the
|
||||||
|
/// entered username for more complex filters.
|
||||||
|
///
|
||||||
|
/// example: "(&(objectClass=person)(memberOf=matrix))"
|
||||||
|
///
|
||||||
|
/// default: "(objectClass=*)"
|
||||||
|
#[serde(default = "default_ldap_search_filter")]
|
||||||
|
pub filter: String,
|
||||||
|
|
||||||
|
/// Attribute to use to uniquely identify the user.
|
||||||
|
///
|
||||||
|
/// example: "uid" or "cn"
|
||||||
|
///
|
||||||
|
/// default: "uid"
|
||||||
|
#[serde(default = "default_ldap_uid_attribute")]
|
||||||
|
pub uid_attribute: String,
|
||||||
|
|
||||||
|
/// Attribute containing the display name of the user.
|
||||||
|
///
|
||||||
|
/// example: "givenName" or "sn"
|
||||||
|
///
|
||||||
|
/// default: "givenName"
|
||||||
|
#[serde(default = "default_ldap_name_attribute")]
|
||||||
|
pub name_attribute: String,
|
||||||
|
|
||||||
|
/// Root of the searches for admin users.
|
||||||
|
///
|
||||||
|
/// Defaults to `base_dn` if empty.
|
||||||
|
///
|
||||||
|
/// example: "ou=admins,dc=example,dc=org"
|
||||||
|
///
|
||||||
|
/// default: ""
|
||||||
|
#[serde(default)]
|
||||||
|
pub admin_base_dn: String,
|
||||||
|
|
||||||
|
/// The LDAP search filter to find administrative users for continuwuity.
|
||||||
|
///
|
||||||
|
/// If left blank, administrative state must be configured manually for each
|
||||||
|
/// user.
|
||||||
|
///
|
||||||
|
/// You can use the variable `{username}` that will be replaced by the
|
||||||
|
/// entered username for more complex filters.
|
||||||
|
///
|
||||||
|
/// example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
|
||||||
|
///
|
||||||
|
/// default: ""
|
||||||
|
#[serde(default)]
|
||||||
|
pub admin_filter: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Clone, Debug)]
|
#[derive(Deserialize, Clone, Debug)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
struct ListeningPort {
|
struct ListeningPort {
|
||||||
|
@ -2252,7 +2376,7 @@ fn default_tracing_flame_filter() -> String {
|
||||||
.to_owned()
|
.to_owned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_jaeger_filter() -> String {
|
fn default_otlp_filter() -> String {
|
||||||
cfg!(debug_assertions)
|
cfg!(debug_assertions)
|
||||||
.then_some("trace,h2=off")
|
.then_some("trace,h2=off")
|
||||||
.unwrap_or("info")
|
.unwrap_or("info")
|
||||||
|
@ -2430,3 +2554,9 @@ pub(super) fn default_blurhash_x_component() -> u32 { 4 }
|
||||||
pub(super) fn default_blurhash_y_component() -> u32 { 3 }
|
pub(super) fn default_blurhash_y_component() -> u32 { 3 }
|
||||||
|
|
||||||
// end recommended & blurhashing defaults
|
// end recommended & blurhashing defaults
|
||||||
|
|
||||||
|
fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() }
|
||||||
|
|
||||||
|
fn default_ldap_uid_attribute() -> String { String::from("uid") }
|
||||||
|
|
||||||
|
fn default_ldap_name_attribute() -> String { String::from("givenName") }
|
||||||
|
|
|
@ -100,7 +100,7 @@ pub fn trap() {
|
||||||
|
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str {
|
pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str {
|
||||||
p.downcast_ref::<&str>().copied().unwrap_or_default()
|
(**p).downcast_ref::<&str>().copied().unwrap_or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
|
|
@ -110,6 +110,8 @@ pub enum Error {
|
||||||
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
|
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
IntoHttp(#[from] ruma::api::error::IntoHttpError),
|
IntoHttp(#[from] ruma::api::error::IntoHttpError),
|
||||||
|
#[error("{0}")]
|
||||||
|
Ldap(Cow<'static, str>),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
Mxc(#[from] ruma::MxcUriError),
|
Mxc(#[from] ruma::MxcUriError),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
|
|
|
@ -18,7 +18,7 @@ pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[
|
||||||
|
|
||||||
/// Experimental, partially supported room versions
|
/// Experimental, partially supported room versions
|
||||||
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
|
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
|
||||||
&[RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
|
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
|
||||||
|
|
||||||
type RoomVersion = (RoomVersionId, RoomVersionStability);
|
type RoomVersion = (RoomVersionId, RoomVersionStability);
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
#![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)]
|
#![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)]
|
||||||
|
|
||||||
use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr};
|
use std::{
|
||||||
|
cmp::Ordering,
|
||||||
|
fmt::{self, Display},
|
||||||
|
str::FromStr,
|
||||||
|
};
|
||||||
|
|
||||||
use ruma::api::Direction;
|
use ruma::api::Direction;
|
||||||
|
|
||||||
|
|
|
@ -66,6 +66,7 @@ serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
ctor.workspace = true
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
|
@ -19,7 +19,7 @@ where
|
||||||
S: Stream<Item = K> + Send + 'a,
|
S: Stream<Item = K> + Send + 'a,
|
||||||
K: AsRef<[u8]> + Send + Sync + 'a,
|
K: AsRef<[u8]> + Send + Sync + 'a,
|
||||||
{
|
{
|
||||||
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a;
|
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, K, S> Get<'a, K, S> for S
|
impl<'a, K, S> Get<'a, K, S> for S
|
||||||
|
@ -29,7 +29,7 @@ where
|
||||||
K: AsRef<[u8]> + Send + Sync + 'a,
|
K: AsRef<[u8]> + Send + Sync + 'a,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a {
|
fn get(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
|
||||||
map.get_batch(self)
|
map.get_batch(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ where
|
||||||
pub(crate) fn get_batch<'a, S, K>(
|
pub(crate) fn get_batch<'a, S, K>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
keys: S,
|
keys: S,
|
||||||
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
|
) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
|
||||||
where
|
where
|
||||||
S: Stream<Item = K> + Send + 'a,
|
S: Stream<Item = K> + Send + 'a,
|
||||||
K: AsRef<[u8]> + Send + Sync + 'a,
|
K: AsRef<[u8]> + Send + Sync + 'a,
|
||||||
|
|
|
@ -10,7 +10,7 @@ use super::stream::is_cached;
|
||||||
use crate::{keyval, keyval::Key, stream};
|
use crate::{keyval, keyval::Key, stream};
|
||||||
|
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send
|
pub fn keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
|
||||||
where
|
where
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
{
|
{
|
||||||
|
|
|
@ -15,7 +15,7 @@ use crate::{
|
||||||
pub fn keys_from<'a, K, P>(
|
pub fn keys_from<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -40,7 +40,7 @@ where
|
||||||
pub fn keys_raw_from<'a, K, P>(
|
pub fn keys_raw_from<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
|
||||||
pub fn keys_prefix<'a, K, P>(
|
pub fn keys_prefix<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &P,
|
prefix: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -37,7 +37,7 @@ where
|
||||||
pub fn keys_raw_prefix<'a, K, P>(
|
pub fn keys_raw_prefix<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a
|
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
K: Deserialize<'a> + Send + 'a,
|
K: Deserialize<'a> + Send + 'a,
|
||||||
|
@ -50,7 +50,7 @@ where
|
||||||
pub fn raw_keys_prefix<'a, P>(
|
pub fn raw_keys_prefix<'a, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a
|
) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -17,7 +17,7 @@ where
|
||||||
S: Stream<Item = K> + Send + 'a,
|
S: Stream<Item = K> + Send + 'a,
|
||||||
K: Serialize + Debug,
|
K: Serialize + Debug,
|
||||||
{
|
{
|
||||||
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a;
|
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, K, S> Qry<'a, K, S> for S
|
impl<'a, K, S> Qry<'a, K, S> for S
|
||||||
|
@ -27,7 +27,7 @@ where
|
||||||
K: Serialize + Debug + 'a,
|
K: Serialize + Debug + 'a,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a {
|
fn qry(self, map: &'a Arc<super::Map>) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a {
|
||||||
map.qry_batch(self)
|
map.qry_batch(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ where
|
||||||
pub(crate) fn qry_batch<'a, S, K>(
|
pub(crate) fn qry_batch<'a, S, K>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
keys: S,
|
keys: S,
|
||||||
) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
|
) -> impl Stream<Item = Result<Handle<'a>>> + Send + 'a
|
||||||
where
|
where
|
||||||
S: Stream<Item = K> + Send + 'a,
|
S: Stream<Item = K> + Send + 'a,
|
||||||
K: Serialize + Debug + 'a,
|
K: Serialize + Debug + 'a,
|
||||||
|
|
|
@ -10,7 +10,7 @@ use super::rev_stream::is_cached;
|
||||||
use crate::{keyval, keyval::Key, stream};
|
use crate::{keyval, keyval::Key, stream};
|
||||||
|
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'_, K>>> + Send
|
pub fn rev_keys<'a, K>(self: &'a Arc<Self>) -> impl Stream<Item = Result<Key<'a, K>>> + Send
|
||||||
where
|
where
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
{
|
{
|
||||||
|
|
|
@ -15,7 +15,7 @@ use crate::{
|
||||||
pub fn rev_keys_from<'a, K, P>(
|
pub fn rev_keys_from<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -41,7 +41,7 @@ where
|
||||||
pub fn rev_keys_raw_from<'a, K, P>(
|
pub fn rev_keys_raw_from<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -10,7 +10,7 @@ use crate::keyval::{Key, result_deserialize_key, serialize_key};
|
||||||
pub fn rev_keys_prefix<'a, K, P>(
|
pub fn rev_keys_prefix<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &P,
|
prefix: &P,
|
||||||
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + use<'a, K, P>
|
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + use<'a, K, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -37,7 +37,7 @@ where
|
||||||
pub fn rev_keys_raw_prefix<'a, K, P>(
|
pub fn rev_keys_raw_prefix<'a, K, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<Key<'_, K>>> + Send + 'a
|
) -> impl Stream<Item = Result<Key<'a, K>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
K: Deserialize<'a> + Send + 'a,
|
K: Deserialize<'a> + Send + 'a,
|
||||||
|
@ -50,7 +50,7 @@ where
|
||||||
pub fn rev_raw_keys_prefix<'a, P>(
|
pub fn rev_raw_keys_prefix<'a, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<Key<'_>>> + Send + 'a
|
) -> impl Stream<Item = Result<Key<'a>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn rev_stream<'a, K, V>(
|
pub fn rev_stream<'a, K, V>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
|
||||||
where
|
where
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
V: Deserialize<'a> + Send,
|
V: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -20,7 +20,7 @@ use crate::{
|
||||||
pub fn rev_stream_from<'a, K, V, P>(
|
pub fn rev_stream_from<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -55,7 +55,7 @@ where
|
||||||
pub fn rev_stream_raw_from<'a, K, V, P>(
|
pub fn rev_stream_raw_from<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
|
||||||
pub fn rev_stream_prefix<'a, K, V, P>(
|
pub fn rev_stream_prefix<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &P,
|
prefix: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -50,7 +50,7 @@ where
|
||||||
pub fn rev_stream_raw_prefix<'a, K, V, P>(
|
pub fn rev_stream_raw_prefix<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
K: Deserialize<'a> + Send + 'a,
|
K: Deserialize<'a> + Send + 'a,
|
||||||
|
@ -68,7 +68,7 @@ where
|
||||||
pub fn rev_raw_stream_prefix<'a, P>(
|
pub fn rev_raw_stream_prefix<'a, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a
|
) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::{keyval, keyval::KeyVal, stream};
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
pub fn stream<'a, K, V>(
|
pub fn stream<'a, K, V>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send
|
||||||
where
|
where
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
V: Deserialize<'a> + Send,
|
V: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -19,7 +19,7 @@ use crate::{
|
||||||
pub fn stream_from<'a, K, V, P>(
|
pub fn stream_from<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -53,7 +53,7 @@ where
|
||||||
pub fn stream_raw_from<'a, K, V, P>(
|
pub fn stream_raw_from<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
from: &P,
|
from: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::keyval::{KeyVal, result_deserialize, serialize_key};
|
||||||
pub fn stream_prefix<'a, K, V, P>(
|
pub fn stream_prefix<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &P,
|
prefix: &P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + use<'a, K, V, P>
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
|
||||||
where
|
where
|
||||||
P: Serialize + ?Sized + Debug,
|
P: Serialize + ?Sized + Debug,
|
||||||
K: Deserialize<'a> + Send,
|
K: Deserialize<'a> + Send,
|
||||||
|
@ -50,7 +50,7 @@ where
|
||||||
pub fn stream_raw_prefix<'a, K, V, P>(
|
pub fn stream_raw_prefix<'a, K, V, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_, K, V>>> + Send + 'a
|
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
K: Deserialize<'a> + Send + 'a,
|
K: Deserialize<'a> + Send + 'a,
|
||||||
|
@ -68,7 +68,7 @@ where
|
||||||
pub fn raw_stream_prefix<'a, P>(
|
pub fn raw_stream_prefix<'a, P>(
|
||||||
self: &'a Arc<Self>,
|
self: &'a Arc<Self>,
|
||||||
prefix: &'a P,
|
prefix: &'a P,
|
||||||
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + 'a
|
) -> impl Stream<Item = Result<KeyVal<'a>>> + Send + 'a
|
||||||
where
|
where
|
||||||
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a,
|
||||||
{
|
{
|
||||||
|
|
|
@ -374,6 +374,10 @@ pub(super) static MAPS: &[Descriptor] = &[
|
||||||
name: "userid_masterkeyid",
|
name: "userid_masterkeyid",
|
||||||
..descriptor::RANDOM_SMALL
|
..descriptor::RANDOM_SMALL
|
||||||
},
|
},
|
||||||
|
Descriptor {
|
||||||
|
name: "userid_origin",
|
||||||
|
..descriptor::RANDOM
|
||||||
|
},
|
||||||
Descriptor {
|
Descriptor {
|
||||||
name: "userid_password",
|
name: "userid_password",
|
||||||
..descriptor::RANDOM
|
..descriptor::RANDOM
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
extern crate conduwuit_core as conduwuit;
|
extern crate conduwuit_core as conduwuit;
|
||||||
extern crate rust_rocksdb as rocksdb;
|
extern crate rust_rocksdb as rocksdb;
|
||||||
|
|
||||||
|
use ctor::{ctor, dtor};
|
||||||
|
|
||||||
conduwuit::mod_ctor! {}
|
conduwuit::mod_ctor! {}
|
||||||
conduwuit::mod_dtor! {}
|
conduwuit::mod_dtor! {}
|
||||||
conduwuit::rustc_flags_capture! {}
|
conduwuit::rustc_flags_capture! {}
|
||||||
|
|
|
@ -443,7 +443,7 @@ pub(crate) fn into_send_seek(result: stream::State<'_>) -> stream::State<'static
|
||||||
unsafe { std::mem::transmute(result) }
|
unsafe { std::mem::transmute(result) }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'_> {
|
fn into_recv_seek(result: stream::State<'static>) -> stream::State<'static> {
|
||||||
// SAFETY: This is to receive the State from the channel; see above.
|
// SAFETY: This is to receive the State from the channel; see above.
|
||||||
unsafe { std::mem::transmute(result) }
|
unsafe { std::mem::transmute(result) }
|
||||||
}
|
}
|
||||||
|
|
|
@ -326,7 +326,7 @@ fn ser_array() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore = "arrayvec deserialization is not implemented (separators)"]
|
||||||
fn de_array() {
|
fn de_array() {
|
||||||
let a: u64 = 123_456;
|
let a: u64 = 123_456;
|
||||||
let b: u64 = 987_654;
|
let b: u64 = 987_654;
|
||||||
|
@ -358,7 +358,7 @@ fn de_array() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore]
|
#[ignore = "Nested sequences are not supported"]
|
||||||
fn de_complex() {
|
fn de_complex() {
|
||||||
type Key<'a> = (&'a UserId, ArrayVec<u64, 2>, &'a RoomId);
|
type Key<'a> = (&'a UserId, ArrayVec<u64, 2>, &'a RoomId);
|
||||||
|
|
||||||
|
|
|
@ -13,13 +13,13 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream {
|
||||||
let ret = quote! {
|
let ret = quote! {
|
||||||
pub static RUSTC_FLAGS: [&str; #flag_len] = [#( #flag ),*];
|
pub static RUSTC_FLAGS: [&str; #flag_len] = [#( #flag ),*];
|
||||||
|
|
||||||
#[conduwuit_core::ctor]
|
#[ctor]
|
||||||
fn _set_rustc_flags() {
|
fn _set_rustc_flags() {
|
||||||
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
|
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static strings have to be yanked on module unload
|
// static strings have to be yanked on module unload
|
||||||
#[conduwuit_core::dtor]
|
#[dtor]
|
||||||
fn _unset_rustc_flags() {
|
fn _unset_rustc_flags() {
|
||||||
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
|
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,12 +32,12 @@ a cool hard fork of Conduit, a Matrix homeserver written in Rust"""
|
||||||
section = "net"
|
section = "net"
|
||||||
priority = "optional"
|
priority = "optional"
|
||||||
conf-files = ["/etc/conduwuit/conduwuit.toml"]
|
conf-files = ["/etc/conduwuit/conduwuit.toml"]
|
||||||
maintainer-scripts = "../../debian/"
|
maintainer-scripts = "../../pkg/debian/"
|
||||||
systemd-units = { unit-name = "conduwuit", start = false }
|
systemd-units = { unit-name = "conduwuit", start = false, unit-scripts = "../../pkg/" }
|
||||||
assets = [
|
assets = [
|
||||||
["../../debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"],
|
["../../pkg/debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"],
|
||||||
["../../README.md", "usr/share/doc/conduwuit/", "644"],
|
["../../README.md", "usr/share/doc/conduwuit/", "644"],
|
||||||
["../../target/release/conduwuit", "usr/sbin/conduwuit", "755"],
|
["../../target/release/conduwuit", "usr/bin/conduwuit", "755"],
|
||||||
["../../conduwuit-example.toml", "etc/conduwuit/conduwuit.toml", "640"],
|
["../../conduwuit-example.toml", "etc/conduwuit/conduwuit.toml", "640"],
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -56,6 +56,7 @@ standard = [
|
||||||
"jemalloc",
|
"jemalloc",
|
||||||
"jemalloc_conf",
|
"jemalloc_conf",
|
||||||
"journald",
|
"journald",
|
||||||
|
"ldap",
|
||||||
"media_thumbnail",
|
"media_thumbnail",
|
||||||
"systemd",
|
"systemd",
|
||||||
"url_preview",
|
"url_preview",
|
||||||
|
@ -63,7 +64,7 @@ standard = [
|
||||||
]
|
]
|
||||||
full = [
|
full = [
|
||||||
"standard",
|
"standard",
|
||||||
"hardened_malloc",
|
# "hardened_malloc", # Conflicts with jemalloc
|
||||||
"jemalloc_prof",
|
"jemalloc_prof",
|
||||||
"perf_measurements",
|
"perf_measurements",
|
||||||
"tokio_console"
|
"tokio_console"
|
||||||
|
@ -114,6 +115,9 @@ jemalloc_stats = [
|
||||||
jemalloc_conf = [
|
jemalloc_conf = [
|
||||||
"conduwuit-core/jemalloc_conf",
|
"conduwuit-core/jemalloc_conf",
|
||||||
]
|
]
|
||||||
|
ldap = [
|
||||||
|
"conduwuit-api/ldap",
|
||||||
|
]
|
||||||
media_thumbnail = [
|
media_thumbnail = [
|
||||||
"conduwuit-service/media_thumbnail",
|
"conduwuit-service/media_thumbnail",
|
||||||
]
|
]
|
||||||
|
@ -122,7 +126,8 @@ perf_measurements = [
|
||||||
"dep:tracing-flame",
|
"dep:tracing-flame",
|
||||||
"dep:tracing-opentelemetry",
|
"dep:tracing-opentelemetry",
|
||||||
"dep:opentelemetry_sdk",
|
"dep:opentelemetry_sdk",
|
||||||
"dep:opentelemetry-jaeger",
|
"dep:opentelemetry-otlp",
|
||||||
|
"dep:opentelemetry-jaeger-propagator",
|
||||||
"conduwuit-core/perf_measurements",
|
"conduwuit-core/perf_measurements",
|
||||||
"conduwuit-core/sentry_telemetry",
|
"conduwuit-core/sentry_telemetry",
|
||||||
]
|
]
|
||||||
|
@ -198,11 +203,14 @@ clap.workspace = true
|
||||||
console-subscriber.optional = true
|
console-subscriber.optional = true
|
||||||
console-subscriber.workspace = true
|
console-subscriber.workspace = true
|
||||||
const-str.workspace = true
|
const-str.workspace = true
|
||||||
|
ctor.workspace = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
opentelemetry-jaeger.optional = true
|
|
||||||
opentelemetry-jaeger.workspace = true
|
|
||||||
opentelemetry.optional = true
|
opentelemetry.optional = true
|
||||||
opentelemetry.workspace = true
|
opentelemetry.workspace = true
|
||||||
|
opentelemetry-otlp.optional = true
|
||||||
|
opentelemetry-otlp.workspace = true
|
||||||
|
opentelemetry-jaeger-propagator.optional = true
|
||||||
|
opentelemetry-jaeger-propagator.workspace = true
|
||||||
opentelemetry_sdk.optional = true
|
opentelemetry_sdk.optional = true
|
||||||
opentelemetry_sdk.workspace = true
|
opentelemetry_sdk.workspace = true
|
||||||
sentry-tower.optional = true
|
sentry-tower.optional = true
|
||||||
|
@ -222,6 +230,7 @@ tracing-subscriber.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
tracing-journald = { workspace = true, optional = true }
|
tracing-journald = { workspace = true, optional = true }
|
||||||
|
|
||||||
|
|
||||||
[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies]
|
[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies]
|
||||||
hardened_malloc-rs.workspace = true
|
hardened_malloc-rs.workspace = true
|
||||||
hardened_malloc-rs.optional = true
|
hardened_malloc-rs.optional = true
|
||||||
|
|
|
@ -7,6 +7,8 @@ use conduwuit_core::{
|
||||||
log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span},
|
log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span},
|
||||||
result::UnwrapOrErr,
|
result::UnwrapOrErr,
|
||||||
};
|
};
|
||||||
|
#[cfg(feature = "perf_measurements")]
|
||||||
|
use opentelemetry::trace::TracerProvider;
|
||||||
use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload};
|
use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload};
|
||||||
|
|
||||||
#[cfg(feature = "perf_measurements")]
|
#[cfg(feature = "perf_measurements")]
|
||||||
|
@ -87,30 +89,35 @@ pub(crate) fn init(
|
||||||
(None, None)
|
(None, None)
|
||||||
};
|
};
|
||||||
|
|
||||||
let jaeger_filter = EnvFilter::try_new(&config.jaeger_filter)
|
let otlp_filter = EnvFilter::try_new(&config.otlp_filter)
|
||||||
.map_err(|e| err!(Config("jaeger_filter", "{e}.")))?;
|
.map_err(|e| err!(Config("otlp_filter", "{e}.")))?;
|
||||||
|
|
||||||
let jaeger_layer = config.allow_jaeger.then(|| {
|
let otlp_layer = config.allow_otlp.then(|| {
|
||||||
opentelemetry::global::set_text_map_propagator(
|
opentelemetry::global::set_text_map_propagator(
|
||||||
opentelemetry_jaeger::Propagator::new(),
|
opentelemetry_jaeger_propagator::Propagator::new(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
let exporter = opentelemetry_otlp::SpanExporter::builder()
|
||||||
.with_auto_split_batch(true)
|
.with_http()
|
||||||
.with_service_name(conduwuit_core::name())
|
.build()
|
||||||
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
.expect("Failed to create OTLP exporter");
|
||||||
.expect("jaeger agent pipeline");
|
|
||||||
|
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
|
||||||
|
.with_batch_exporter(exporter)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let tracer = provider.tracer(conduwuit_core::name());
|
||||||
|
|
||||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||||
|
|
||||||
let (jaeger_reload_filter, jaeger_reload_handle) =
|
let (otlp_reload_filter, otlp_reload_handle) =
|
||||||
reload::Layer::new(jaeger_filter.clone());
|
reload::Layer::new(otlp_filter.clone());
|
||||||
reload_handles.add("jaeger", Box::new(jaeger_reload_handle));
|
reload_handles.add("otlp", Box::new(otlp_reload_handle));
|
||||||
|
|
||||||
Some(telemetry.with_filter(jaeger_reload_filter))
|
Some(telemetry.with_filter(otlp_reload_filter))
|
||||||
});
|
});
|
||||||
|
|
||||||
let subscriber = subscriber.with(flame_layer).with(jaeger_layer);
|
let subscriber = subscriber.with(flame_layer).with(otlp_layer);
|
||||||
(subscriber, flame_guard)
|
(subscriber, flame_guard)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ mod sentry;
|
||||||
mod server;
|
mod server;
|
||||||
mod signal;
|
mod signal;
|
||||||
|
|
||||||
|
use ctor::{ctor, dtor};
|
||||||
use server::Server;
|
use server::Server;
|
||||||
|
|
||||||
rustc_flags_capture! {}
|
rustc_flags_capture! {}
|
||||||
|
|
|
@ -125,6 +125,7 @@ tokio.workspace = true
|
||||||
tower.workspace = true
|
tower.workspace = true
|
||||||
tower-http.workspace = true
|
tower-http.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
ctor.workspace = true
|
||||||
|
|
||||||
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
|
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
|
||||||
sd-notify.workspace = true
|
sd-notify.workspace = true
|
||||||
|
|
|
@ -12,6 +12,7 @@ use std::{panic::AssertUnwindSafe, pin::Pin, sync::Arc};
|
||||||
|
|
||||||
use conduwuit::{Error, Result, Server};
|
use conduwuit::{Error, Result, Server};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
|
use ctor::{ctor, dtor};
|
||||||
use futures::{Future, FutureExt, TryFutureExt};
|
use futures::{Future, FutureExt, TryFutureExt};
|
||||||
|
|
||||||
conduwuit::mod_ctor! {}
|
conduwuit::mod_ctor! {}
|
||||||
|
|
|
@ -30,7 +30,7 @@ use tower::{Service, ServiceExt};
|
||||||
|
|
||||||
type MakeService = IntoMakeServiceWithConnectInfo<Router, net::SocketAddr>;
|
type MakeService = IntoMakeServiceWithConnectInfo<Router, net::SocketAddr>;
|
||||||
|
|
||||||
const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0);
|
||||||
const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750);
|
const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750);
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, level = "debug")]
|
#[tracing::instrument(skip_all, level = "debug")]
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue