Compare commits

..

No commits in common. "main" and "v0.5.0-rc.6" have entirely different histories.

335 changed files with 10677 additions and 17776 deletions

View file

@ -1,2 +0,0 @@
[alias]
xtask = "run --package xtask --"

View file

@ -23,10 +23,6 @@ indent_size = 2
indent_style = tab
max_line_length = 98
[*.yml]
[{.forgejo/**/*.yml,.github/**/*.yml}]
indent_size = 2
indent_style = space
[*.json]
indent_size = 4
indent_style = space

4
.envrc
View file

@ -2,8 +2,6 @@
dotenv_if_exists
if [ -f /etc/os-release ] && grep -q '^ID=nixos' /etc/os-release; then
use flake ".#${DIRENV_DEVSHELL:-default}"
fi
use flake ".#${DIRENV_DEVSHELL:-default}"
PATH_add bin

View file

@ -1,58 +0,0 @@
name: detect-runner-os
description: |
Detect the actual OS name and version of the runner.
Provides separate outputs for name, version, and a combined slug.
outputs:
name:
description: 'OS name (e.g. Ubuntu, Debian)'
value: ${{ steps.detect.outputs.name }}
version:
description: 'OS version (e.g. 22.04, 11)'
value: ${{ steps.detect.outputs.version }}
slug:
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
value: ${{ steps.detect.outputs.slug }}
node_major:
description: 'Major version of Node.js if available (e.g. 22)'
value: ${{ steps.detect.outputs.node_major }}
node_version:
description: 'Full Node.js version if available (e.g. 22.19.0)'
value: ${{ steps.detect.outputs.node_version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: detect
shell: bash
run: |
# Detect OS version (try lsb_release first, fall back to /etc/os-release)
OS_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d'"' -f2)
# Detect OS name and capitalise (try lsb_release first, fall back to /etc/os-release)
OS_NAME=$(lsb_release -is 2>/dev/null || grep "^ID=" /etc/os-release | cut -d'=' -f2 | tr -d '"' | sed 's/\b\(.\)/\u\1/g')
# Create combined slug
OS_SLUG="${OS_NAME}-${OS_VERSION}"
# Detect Node.js version if available
if command -v node >/dev/null 2>&1; then
NODE_VERSION=$(node --version | sed 's/v//')
NODE_MAJOR=$(echo $NODE_VERSION | cut -d. -f1)
echo "node_version=${NODE_VERSION}" >> $GITHUB_OUTPUT
echo "node_major=${NODE_MAJOR}" >> $GITHUB_OUTPUT
echo "🔍 Detected Node.js: v${NODE_VERSION}"
else
echo "node_version=" >> $GITHUB_OUTPUT
echo "node_major=" >> $GITHUB_OUTPUT
echo "🔍 Node.js not found"
fi
# Set OS outputs
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
# Log detection results
echo "🔍 Detected Runner OS: ${OS_NAME} ${OS_VERSION}"

View file

@ -2,14 +2,20 @@ name: sccache
description: |
Install sccache for caching builds in GitHub Actions.
inputs:
token:
description: 'A Github PAT'
required: false
runs:
using: composite
steps:
- name: Install sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
with:
token: ${{ inputs.token }}
- name: Configure sccache
uses: https://github.com/actions/github-script@v8
uses: https://github.com/actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');

View file

@ -1,167 +0,0 @@
name: setup-llvm-with-apt
description: |
Set up LLVM toolchain with APT package management and smart caching.
Supports cross-compilation architectures and additional package installation.
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
inputs:
dpkg-arch:
description: 'Debian architecture for cross-compilation (e.g. arm64)'
required: false
default: ''
extra-packages:
description: 'Additional APT packages to install (space-separated)'
required: false
default: ''
llvm-version:
description: 'LLVM version to install'
required: false
default: '20'
outputs:
llvm-version:
description: 'Installed LLVM version'
value: ${{ steps.configure.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: ./.forgejo/actions/detect-runner-os
- name: Configure cross-compilation architecture
if: inputs.dpkg-arch != ''
shell: bash
run: |
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
# Restrict default sources to amd64
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
# Add ports sources for foreign architecture
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
EOF
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
- name: Start LLVM cache group
shell: bash
run: echo "::group::📦 Restoring LLVM cache"
- name: Check for LLVM cache
id: cache
uses: actions/cache@v4
with:
path: |
/usr/bin/clang-*
/usr/bin/clang++-*
/usr/bin/lld-*
/usr/bin/llvm-*
/usr/lib/llvm-*/
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
/usr/lib/x86_64-linux-gnu/libclang*.so*
/etc/apt/sources.list.d/archive_uri-*
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
key: llvm-${{ steps.runner-os.outputs.slug }}-v${{ inputs.llvm-version }}-v3-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
- name: End LLVM cache group
shell: bash
run: echo "::endgroup::"
- name: Check and install LLVM if needed
id: llvm-setup
shell: bash
run: |
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
# Check both binaries and libraries exist
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
echo "needs-install=false" >> $GITHUB_OUTPUT
else
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
echo "::endgroup::"
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
exit 1
fi
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
echo "needs-install=true" >> $GITHUB_OUTPUT
fi
- name: Prepare for additional packages
if: inputs.extra-packages != ''
shell: bash
run: |
# Update APT if LLVM was cached (installer script already does apt-get update)
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
sudo apt-get update
echo "::endgroup::"
fi
echo "::group::📦 Installing additional packages"
- name: Install additional packages
if: inputs.extra-packages != ''
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
with:
packages: ${{ inputs.extra-packages }}
version: 1.0
- name: End package installation group
if: inputs.extra-packages != ''
shell: bash
run: echo "::endgroup::"
- name: Configure LLVM environment
id: configure
shell: bash
run: |
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
# Create symlinks
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
echo " ✓ Created symlinks"
# Setup library paths
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
if [ -d "$LLVM_LIB_PATH" ]; then
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
sudo ldconfig
echo " ✓ Configured library paths"
else
# Fallback to standard library location
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
echo " ✓ Using fallback library path"
fi
fi
# Set output
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
echo "::endgroup::"
echo "✅ LLVM ready: $(clang --version | head -1)"

View file

@ -1,226 +0,0 @@
name: setup-rust
description: |
Set up Rust toolchain with sccache for compilation caching.
Respects rust-toolchain.toml by default or accepts explicit version override.
inputs:
cache-key-suffix:
description: 'Optional suffix for cache keys (e.g. platform identifier)'
required: false
default: ''
rust-components:
description: 'Additional Rust components to install (space-separated)'
required: false
default: ''
rust-target:
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
required: false
default: ''
rust-version:
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
required: false
default: '1.87.0'
sccache-cache-limit:
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
required: false
default: '2G'
github-token:
description: 'GitHub token for downloading sccache from GitHub releases'
required: false
default: ''
outputs:
rust-version:
description: 'Installed Rust version'
value: ${{ steps.rust-setup.outputs.version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: runner-os
uses: ./.forgejo/actions/detect-runner-os
- name: Configure Cargo environment
shell: bash
run: |
# Use workspace-relative paths for better control and consistency
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
# Limit binstall resolution timeout to avoid GitHub rate limit delays
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
# Ensure directories exist for first run
mkdir -p "${{ github.workspace }}/.cargo"
mkdir -p "${{ github.workspace }}/.sccache"
mkdir -p "${{ github.workspace }}/target"
mkdir -p "${{ github.workspace }}/.rustup"
- name: Start cache restore group
shell: bash
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
- name: Cache Cargo registry and git
id: registry-cache
uses: actions/cache@v4
with:
path: |
.cargo/registry/index
.cargo/registry/cache
.cargo/git/db
# Registry cache saved per workflow, restored from any workflow's cache
# Each workflow maintains its own registry that accumulates its needed crates
key: cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ github.workflow }}
restore-keys: |
cargo-registry-${{ steps.runner-os.outputs.slug }}-
- name: Cache toolchain binaries
id: toolchain-cache
uses: actions/cache@v4
with:
path: |
.cargo/bin
.rustup/toolchains
.rustup/update-hashes
# Shared toolchain cache across all Rust versions
key: toolchain-${{ steps.runner-os.outputs.slug }}
- name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Cache build artifacts
id: build-cache
uses: actions/cache@v4
with:
path: |
target/**/deps
!target/**/deps/*.rlib
target/**/build
target/**/.fingerprint
target/**/incremental
target/**/*.d
/timelord/
# Build artifacts - cache per code change, restore from deps when code changes
key: >-
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
restore-keys: |
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
- name: End cache restore group
shell: bash
run: echo "::endgroup::"
- name: Setup Rust toolchain
shell: bash
run: |
# Install rustup if not already cached
if ! command -v rustup &> /dev/null; then
echo "::group::📦 Installing rustup"
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
source "$CARGO_HOME/env"
echo "::endgroup::"
else
echo "✅ rustup already available"
fi
# Setup the appropriate Rust version
if [[ -n "${{ inputs.rust-version }}" ]]; then
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
# Set override first to prevent rust-toolchain.toml from auto-installing
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
# Check if we need to install/update the toolchain
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
rustup update ${{ inputs.rust-version }}
else
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
fi
else
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
rustup show
fi
echo "::endgroup::"
- name: Configure PATH and install tools
shell: bash
env:
GITHUB_TOKEN: ${{ inputs.github-token }}
run: |
# Add .cargo/bin to PATH permanently for all subsequent steps
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
# Install cargo-binstall for fast binary installations
if command -v cargo-binstall &> /dev/null; then
echo "✅ cargo-binstall already available"
else
echo "::group::📦 Installing cargo-binstall"
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
echo "::endgroup::"
fi
if command -v prek &> /dev/null; then
echo "✅ prek already available"
else
echo "::group::📦 Installing prek"
# prek isn't regularly published to crates.io, so we use git source
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
echo "::endgroup::"
fi
if command -v timelord &> /dev/null; then
echo "✅ timelord already available"
else
echo "::group::📦 Installing timelord"
cargo-binstall -y --no-symlinks timelord-cli
echo "::endgroup::"
fi
- name: Configure sccache environment
shell: bash
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
# Configure incremental compilation GC
# If we restored from old cache (partial hit), clean up aggressively
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
echo "♻️ Partial cache hit - enabling cache cleanup"
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
fi
- name: Install Rust components
if: inputs.rust-components != ''
shell: bash
run: |
echo "📦 Installing components: ${{ inputs.rust-components }}"
rustup component add ${{ inputs.rust-components }}
- name: Install Rust target
if: inputs.rust-target != ''
shell: bash
run: |
echo "📦 Installing target: ${{ inputs.rust-target }}"
rustup target add ${{ inputs.rust-target }}
- name: Output version and summary
id: rust-setup
shell: bash
run: |
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
echo "📋 Setup complete:"
echo " Rust: $(rustc --version)"
echo " Cargo: $(cargo --version)"
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"

View file

@ -1,99 +1,46 @@
name: timelord
description: |
Use timelord to set file timestamps with git-warp-time fallback for cache misses
Use timelord to set file timestamps
inputs:
key:
description: |
The key to use for caching the timelord data.
required: false
default: ''
This should be unique to the repository and the runner.
required: true
default: timelord-v0
path:
description: |
The path to the directory to be timestamped.
required: false
default: ''
outputs:
database-path:
description: Path to timelord database
value: '${{ env.TIMELORD_CACHE_PATH }}'
This should be the root of the repository.
required: true
default: .
runs:
using: composite
steps:
- name: Set defaults
- name: Cache timelord-cli installation
id: cache-timelord-bin
uses: actions/cache@v3
with:
path: ~/.cargo/bin/timelord
key: timelord-cli-v3.0.1
- name: Install timelord-cli
uses: https://github.com/cargo-bins/cargo-binstall@main
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
- run: cargo binstall timelord-cli@3.0.1
shell: bash
run: |
echo "TIMELORD_KEY=${{ inputs.key || format('timelord-v1-{0}-{1}', github.repository, hashFiles('**/*.rs', '**/Cargo.toml', '**/Cargo.lock')) }}" >> $GITHUB_ENV
echo "TIMELORD_PATH=${{ inputs.path || '.' }}" >> $GITHUB_ENV
echo "TIMELORD_CACHE_PATH=$HOME/.cache/timelord" >> $GITHUB_ENV
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
- name: Restore binary cache
id: binary-cache
uses: actions/cache/restore@v4
- name: Load timelord files
uses: actions/cache/restore@v3
with:
path: /usr/share/rust/.cargo/bin
key: timelord-binaries-v1
- name: Check if binaries need installation
path: /timelord/
key: ${{ inputs.key }}
- name: Run timelord to set timestamps
shell: bash
id: check-binaries
run: |
NEED_INSTALL=false
if [ ! -e /usr/share/rust/.cargo/bin/timelord ]; then
echo "timelord-cli not found at /usr/share/rust/.cargo/bin/timelord, needs installation"
NEED_INSTALL=true
fi
if [ ! -e /usr/share/rust/.cargo/bin/git-warp-time ]; then
echo "git-warp-time not found at /usr/share/rust/.cargo/bin/git-warp-time, needs installation"
NEED_INSTALL=true
fi
echo "need-install=$NEED_INSTALL" >> $GITHUB_OUTPUT
- name: Install timelord-cli and git-warp-time
if: steps.check-binaries.outputs.need-install == 'true'
uses: https://github.com/taiki-e/install-action@v2
run: timelord sync --source-dir ${{ inputs.path }} --cache-dir /timelord/
- name: Save timelord
uses: actions/cache/save@v3
with:
tool: git-warp-time,timelord-cli@3.0.1
- name: Save binary cache
if: steps.check-binaries.outputs.need-install == 'true'
uses: actions/cache/save@v4
with:
path: /usr/share/rust/.cargo/bin
key: timelord-binaries-v1
- name: Restore timelord cache with fallbacks
id: timelord-restore
uses: actions/cache/restore@v4
with:
path: ${{ env.TIMELORD_CACHE_PATH }}
key: ${{ env.TIMELORD_KEY }}
restore-keys: |
timelord-v1-${{ github.repository }}-
- name: Initialize timestamps on complete cache miss
if: steps.timelord-restore.outputs.cache-hit != 'true'
shell: bash
run: |
echo "Complete timelord cache miss - running git-warp-time"
git fetch --unshallow
if [ "${{ env.TIMELORD_PATH }}" = "." ]; then
/usr/share/rust/.cargo/bin/git-warp-time --quiet
else
/usr/share/rust/.cargo/bin/git-warp-time --quiet ${{ env.TIMELORD_PATH }}
fi
echo "Git timestamps restored"
- name: Run timelord sync
shell: bash
run: |
mkdir -p ${{ env.TIMELORD_CACHE_PATH }}
/usr/share/rust/.cargo/bin/timelord sync --source-dir ${{ env.TIMELORD_PATH }} --cache-dir ${{ env.TIMELORD_CACHE_PATH }}
- name: Save updated timelord cache immediately
uses: actions/cache/save@v4
with:
path: ${{ env.TIMELORD_CACHE_PATH }}
key: ${{ env.TIMELORD_KEY }}
path: /timelord/
key: ${{ inputs.key }}

View file

@ -1,55 +0,0 @@
version: 1
x-source: &source forgejo.ellis.link/continuwuation/continuwuity
x-tags:
releases: &tags-releases
tags:
allow:
- "latest"
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
- "v[0-9]+\\.[0-9]+"
- "v[0-9]+"
main: &tags-main
tags:
allow:
- "latest"
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
- "v[0-9]+\\.[0-9]+"
- "v[0-9]+"
- "main"
commits: &tags-commits
tags:
allow:
- "latest"
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
- "v[0-9]+\\.[0-9]+"
- "v[0-9]+"
- "main"
- "sha-[a-f0-9]+"
all: &tags-all
tags:
allow:
- ".*"
# Registry credentials
creds:
- registry: forgejo.ellis.link
user: "{{env \"BUILTIN_REGISTRY_USER\"}}"
pass: "{{env \"BUILTIN_REGISTRY_PASSWORD\"}}"
- registry: registry.gitlab.com
user: "{{env \"GITLAB_USERNAME\"}}"
pass: "{{env \"GITLAB_TOKEN\"}}"
# Global defaults
defaults:
parallel: 3
interval: 2h
digestTags: true
# Sync configuration - each registry gets different image sets
sync:
- source: *source
target: registry.gitlab.com/continuwuity/continuwuity
type: repository
<<: *tags-main

View file

@ -17,11 +17,10 @@ jobs:
docs:
name: Build and Deploy Documentation
runs-on: ubuntu-latest
if: secrets.CLOUDFLARE_API_TOKEN != ''
steps:
- name: Sync repository
uses: actions/checkout@v5
uses: https://github.com/actions/checkout@v4
with:
persist-credentials: false
fetch-depth: 0
@ -49,23 +48,10 @@ jobs:
cp ./docs/static/_headers ./public/_headers
echo "Copied .well-known files and _headers to ./public"
- name: Detect runner environment
id: runner-env
uses: ./.forgejo/actions/detect-runner-os
- name: Setup Node.js
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
uses: https://github.com/actions/setup-node@v5
uses: https://github.com/actions/setup-node@v4
with:
node-version: 22
- name: Cache npm dependencies
uses: actions/cache@v3
with:
path: ~/.npm
key: ${{ steps.runner-env.outputs.slug }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ steps.runner-env.outputs.slug }}-node-
node-version: 20
- name: Install dependencies
run: npm install --save-dev wrangler@latest

View file

@ -11,16 +11,16 @@ concurrency:
jobs:
build-and-deploy:
name: 🏗️ Build and Deploy
name: Build and Deploy Element Web
runs-on: ubuntu-latest
steps:
- name: 📦 Setup Node.js
uses: https://github.com/actions/setup-node@v5
- name: Setup Node.js
uses: https://code.forgejo.org/actions/setup-node@v4
with:
node-version: "22"
node-version: "20"
- name: 🔨 Clone, setup, and build Element Web
- name: Clone, setup, and build Element Web
run: |
echo "Cloning Element Web..."
git clone https://github.com/maunium/element-web
@ -64,7 +64,7 @@ jobs:
echo "Checking for build output..."
ls -la webapp/
- name: ⚙️ Create config.json
- name: Create config.json
run: |
cat <<EOF > ./element-web/webapp/config.json
{
@ -100,25 +100,28 @@ jobs:
echo "Created ./element-web/webapp/config.json"
cat ./element-web/webapp/config.json
- name: 📤 Upload Artifact
uses: https://code.forgejo.org/actions/upload-artifact@v4
- name: Upload Artifact
uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: element-web
path: ./element-web/webapp/
retention-days: 14
- name: 🛠️ Install Wrangler
- name: Install Wrangler
run: npm install --save-dev wrangler@latest
- name: 🚀 Deploy to Cloudflare Pages
if: vars.CLOUDFLARE_PROJECT_NAME != ''
id: deploy
- name: Deploy to Cloudflare Pages (Production)
if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
uses: https://github.com/cloudflare/wrangler-action@v3
with:
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
command: >-
pages deploy ./element-web/webapp
--branch="${{ github.ref == 'refs/heads/main' && 'main' || github.head_ref || github.ref_name }}"
--commit-dirty=true
--project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
- name: Deploy to Cloudflare Pages (Preview)
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
uses: https://github.com/cloudflare/wrangler-action@v3
with:
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"

View file

@ -1,47 +0,0 @@
name: Mirror Container Images
on:
schedule:
# Run every 2 hours
- cron: "0 */2 * * *"
workflow_dispatch:
inputs:
dry_run:
description: 'Dry run (check only, no actual mirroring)'
required: false
default: false
type: boolean
concurrency:
group: "mirror-images"
cancel-in-progress: true
jobs:
mirror-images:
runs-on: ubuntu-latest
env:
BUILTIN_REGISTRY_USER: ${{ vars.BUILTIN_REGISTRY_USER }}
BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }}
GITLAB_USERNAME: ${{ vars.GITLAB_USERNAME }}
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Install regctl
uses: https://forgejo.ellis.link/continuwuation/regclient-actions/regctl-installer@main
with:
binary: regsync
- name: Check what images need mirroring
run: |
echo "Checking images that need mirroring..."
regsync check -c .forgejo/regsync/regsync.yml -v info
- name: Mirror images
if: ${{ !inputs.dry_run }}
run: |
echo "Starting image mirroring..."
regsync once -c .forgejo/regsync/regsync.yml -v info

View file

@ -1,83 +0,0 @@
name: Checks / Prek
on:
pull_request:
push:
branches:
- main
workflow_dispatch:
permissions:
contents: read
jobs:
fast-checks:
name: Pre-commit & Formatting
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Setup Rust nightly
uses: ./.forgejo/actions/setup-rust
with:
rust-version: nightly
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run prek
run: |
prek run \
--all-files \
--hook-stage manual \
--show-diff-on-failure \
--color=always \
-v
- name: Check Rust formatting
run: |
cargo +nightly fmt --all -- --check && \
echo "✅ Formatting check passed" || \
exit 1
clippy-and-tests:
name: Clippy and Cargo Tests
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Setup LLVM
uses: ./.forgejo/actions/setup-llvm-with-apt
with:
extra-packages: liburing-dev liburing2
- name: Setup Rust with caching
uses: ./.forgejo/actions/setup-rust
with:
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Run Clippy lints
run: |
cargo clippy \
--workspace \
--features full \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Run Cargo tests
run: |
cargo test \
--workspace \
--features full \
--locked \
--profile test \
--all-targets \
--no-fail-fast

View file

@ -3,25 +3,15 @@ concurrency:
group: "release-image-${{ github.ref }}"
on:
pull_request:
paths-ignore:
- "*.md"
- "**/*.md"
- ".gitlab-ci.yml"
- ".gitignore"
- "renovate.json"
- "pkg/**"
- "docs/**"
push:
branches:
- main
paths-ignore:
- "*.md"
- "**/*.md"
- ".gitlab-ci.yml"
- ".gitignore"
- "renovate.json"
- "pkg/**"
- "debian/**"
- "docker/**"
- "docs/**"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
@ -41,7 +31,7 @@ jobs:
steps:
- name: Setting variables
uses: https://github.com/actions/github-script@v8
uses: https://github.com/actions/github-script@v7
id: var
with:
script: |
@ -53,16 +43,12 @@ jobs:
let images = []
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
images.push(builtinImage)
} else {
// Fallback to official registry for forks/PRs without credentials
images.push('forgejo.ellis.link/continuwuation/continuwuity')
}
core.setOutput('images', images.join("\n"))
core.setOutput('images_list', images.join(","))
const platforms = ['linux/amd64', 'linux/arm64']
core.setOutput('build_matrix', JSON.stringify({
platform: platforms,
target_cpu: ['base'],
include: platforms.map(platform => { return {
platform,
slug: platform.replace('/', '-')
@ -80,8 +66,6 @@ jobs:
strategy:
matrix:
{
"target_cpu": ["base"],
"profile": ["release"],
"include":
[
{ "platform": "linux/amd64", "slug": "linux-amd64" },
@ -89,7 +73,6 @@ jobs:
],
"platform": ["linux/amd64", "linux/arm64"],
}
steps:
- name: Echo strategy
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
@ -97,26 +80,19 @@ jobs:
run: echo '${{ toJSON(matrix) }}'
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: rust-toolchain
uses: ./.forgejo/actions/rust-toolchain
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
- name: Set up QEMU
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: docker/setup-qemu-action@v3
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Login to builtin registry
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/login-action@v3
with:
registry: ${{ env.BUILTIN_REGISTRY }}
@ -142,18 +118,15 @@ jobs:
run: |
calculatedSha=$(git rev-parse --short ${{ github.sha }})
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
echo "Short SHA: $calculatedSha"
- name: Get Git commit timestamps
run: |
timestamp=$(git log -1 --pretty=%ct)
echo "TIMESTAMP=$timestamp" >> $GITHUB_ENV
echo "Commit timestamp: $timestamp"
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
- uses: ./.forgejo/actions/timelord
id: timelord
with:
key: timelord-v0
path: .
- name: Cache Rust registry
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: actions/cache@v3
with:
path: |
@ -163,15 +136,13 @@ jobs:
.cargo/registry/src
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
- name: Cache cargo target
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-cargo-target
uses: actions/cache@v3
with:
path: |
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
cargo-target-${{ matrix.slug }}
key: cargo-target-${{ matrix.slug }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
- name: Cache apt cache
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-apt
uses: actions/cache@v3
with:
@ -179,7 +150,6 @@ jobs:
var-cache-apt-${{ matrix.slug }}
key: var-cache-apt-${{ matrix.slug }}
- name: Cache apt lib
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-apt-lib
uses: actions/cache@v3
with:
@ -187,20 +157,18 @@ jobs:
var-lib-apt-${{ matrix.slug }}
key: var-lib-apt-${{ matrix.slug }}
- name: inject cache into docker
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0
with:
cache-map: |
{
".cargo/registry": "/usr/local/cargo/registry",
".cargo/git/db": "/usr/local/cargo/git/db",
"cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}": {
"cargo-target-${{ matrix.slug }}": {
"target": "/app/target",
"id": "cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}"
"id": "cargo-target-${{ matrix.platform }}"
},
"var-cache-apt-${{ matrix.slug }}": "/var/cache/apt",
"var-lib-apt-${{ matrix.slug }}": "/var/lib/apt",
"{{ steps.timelord.outputs.database-path }}":"/timelord"
"var-lib-apt-${{ matrix.slug }}": "/var/lib/apt"
}
skip-extraction: ${{ steps.cache.outputs.cache-hit }}
@ -211,57 +179,40 @@ jobs:
context: .
file: "docker/Dockerfile"
build-args: |
GIT_COMMIT_HASH=${{ github.sha }}
GIT_COMMIT_HASH=${{ github.sha }})
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
GIT_REMOTE_URL=${{github.event.repository.html_url }}
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
CARGO_INCREMENTAL=${{ env.BUILDKIT_ENDPOINT != '' && '1' || '0' }}
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
cache-from: type=gha
# cache-to: type=gha,mode=max
sbom: true
outputs: |
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', needs.define-variables.outputs.images_list) || format('type=image,"name={0}",push=false', needs.define-variables.outputs.images_list) }}
type=local,dest=/tmp/binaries
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
env:
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
# For publishing multi-platform manifests
- name: Export digest
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
# Binary extracted via local output for all builds
- name: Rename extracted binary
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
- name: Upload binary artifact
uses: forgejo/upload-artifact@v4
with:
name: conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
path: /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
if-no-files-found: error
- name: Upload digest
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: forgejo/upload-artifact@v4
with:
name: digests-${{ matrix.slug }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 5
retention-days: 1
merge:
runs-on: dind
needs: [define-variables, build-image]
steps:
- name: Download digests
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: forgejo/download-artifact@v4
with:
path: /tmp/digests
@ -269,7 +220,6 @@ jobs:
merge-multiple: true
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Login to builtin registry
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/login-action@v3
with:
registry: ${{ env.BUILTIN_REGISTRY }}
@ -277,33 +227,25 @@ jobs:
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/setup-buildx-action@v3
with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
- name: Extract metadata (tags) for Docker
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
id: meta
uses: docker/metadata-action@v5
with:
tags: |
type=semver,pattern={{version}},prefix=v
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
type=semver,pattern=v{{version}}
type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }}
type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }}
type=ref,event=pr
type=sha,format=long
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
images: ${{needs.define-variables.outputs.images}}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
- name: Create manifest list and push
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
working-directory: /tmp/digests
env:
IMAGES: ${{needs.define-variables.outputs.images}}
@ -321,7 +263,6 @@ jobs:
done
- name: Inspect image
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
env:
IMAGES: ${{needs.define-variables.outputs.images}}
shell: bash

View file

@ -1,132 +0,0 @@
name: Maintenance / Renovate
enable-email-notifications: true
on:
schedule:
# Run at 5am UTC daily to avoid late-night dev
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
dryRun:
description: 'Dry run mode'
required: false
default: ''
type: choice
options:
- ''
- 'extract'
- 'lookup'
- 'full'
logLevel:
description: 'Log level'
required: false
default: 'info'
type: choice
options:
- 'debug'
- 'info'
- 'warning'
- 'critical'
push:
branches:
- main
paths:
# Re-run when config changes
- '.forgejo/workflows/renovate.yml'
- 'renovate.json'
jobs:
renovate:
name: Renovate
runs-on: ubuntu-latest
container:
image: ghcr.io/renovatebot/renovate:41.99.7@sha256:849ab4d4234eaf42a6be0c21b506d33bbe8d108db041cdb2a8489b6c1e214d9a
options: --tmpfs /tmp:exec
steps:
- name: Checkout
uses: actions/checkout@v5
with:
show-progress: false
- name: print node heap
run: /usr/local/renovate/node -e 'console.log(`node heap limit = ${require("v8").getHeapStatistics().heap_size_limit / (1024 * 1024)} Mb`)'
- name: Restore renovate repo cache
uses: actions/cache/restore@v4
with:
path: |
/tmp/renovate/cache/renovate/repository
key: repo-cache-${{ github.run_id }}
restore-keys: |
repo-cache-
- name: Restore renovate package cache
uses: actions/cache/restore@v4
with:
path: |
/tmp/renovate/cache/renovate/renovate-cache-sqlite
key: package-cache-${{ github.run_id }}
restore-keys: |
package-cache-
- name: Restore renovate OSV cache
uses: actions/cache/restore@v4
with:
path: |
/tmp/osv
key: osv-cache-${{ github.run_id }}
restore-keys: |
osv-cache-
- name: Self-hosted Renovate
run: renovate
env:
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
RENOVATE_PLATFORM: forgejo
RENOVATE_ENDPOINT: ${{ github.server_url }}
RENOVATE_AUTODISCOVER: 'false'
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
RENOVATE_GIT_TIMEOUT: 60000
RENOVATE_REQUIRE_CONFIG: 'required'
RENOVATE_ONBOARDING: 'false'
RENOVATE_INHERIT_CONFIG: 'true'
RENOVATE_GITHUB_TOKEN_WARN: 'false'
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
GITHUB_COM_TOKEN: ${{ secrets.GH_PUBLIC_RO || secrets.GH_TOKEN }}
RENOVATE_REPOSITORY_CACHE: 'enabled'
RENOVATE_X_SQLITE_PACKAGE_CACHE: 'true'
OSV_OFFLINE_ROOT_DIR: /tmp/osv
- name: Save renovate repo cache
if: always()
uses:
actions/cache/save@v4
with:
path: |
/tmp/renovate/cache/renovate/repository
key: repo-cache-${{ github.run_id }}
- name: Save renovate package cache
if: always()
uses: actions/cache/save@v4
with:
path: |
/tmp/renovate/cache/renovate/renovate-cache-sqlite
key: package-cache-${{ github.run_id }}
- name: Save renovate OSV cache
if: always()
uses: actions/cache/save@v4
with:
path: |
/tmp/osv
key: osv-cache-${{ github.run_id }}

View file

@ -0,0 +1,142 @@
name: Rust Checks
on:
push:
jobs:
format:
name: Format
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
with:
toolchain: "nightly"
components: "rustfmt"
- name: Check formatting
run: |
cargo +nightly fmt --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Clippy
run: |
cargo clippy \
--workspace \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Show sccache stats
if: always()
run: sccache --show-stats
cargo-test:
name: Cargo Test
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Cargo Test
run: |
cargo test \
--workspace \
--locked \
--profile test \
--all-targets \
--no-fail-fast
- name: Show sccache stats
if: always()
run: sccache --show-stats

View file

@ -5,5 +5,3 @@ f419c64aca300a338096b4e0db4c73ace54f23d0
# use chain_width 60
162948313c212193965dece50b816ef0903172ba
5998a0d883d31b866f7c8c46433a8857eae51a89
# trailing whitespace and newlines
46c193e74b2ce86c48ce802333a0aabce37fd6e9

2
.gitattributes vendored
View file

@ -84,4 +84,4 @@ Cargo.lock text
*.zst binary
# Text files where line endings should be preserved
*.patch -text
*.patch -text

4
.github/FUNDING.yml vendored
View file

@ -1,4 +0,0 @@
github: [JadedBlueEyes, nexy7574]
custom:
- https://ko-fi.com/nexy7574
- https://ko-fi.com/JadedBlueEyes

View file

@ -13,4 +13,3 @@ Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
Timo Kösters <timo@koesters.xyz>
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>

View file

@ -1,47 +0,0 @@
default_install_hook_types:
- pre-commit
- commit-msg
default_stages:
- pre-commit
- manual
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: fix-byte-order-marker
- id: check-case-conflict
- id: check-symlinks
- id: destroyed-symlinks
- id: check-yaml
- id: check-json
- id: check-toml
- id: end-of-file-fixer
- id: trailing-whitespace
- id: mixed-line-ending
- id: check-merge-conflict
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.26.0
hooks:
- id: typos
- id: typos
name: commit-msg-typos
stages: [commit-msg]
- repo: https://github.com/crate-ci/committed
rev: v1.1.7
hooks:
- id: committed
- repo: local
hooks:
- id: cargo-fmt
name: cargo fmt
entry: cargo +nightly fmt --
language: system
types: [rust]
pass_filenames: false
stages:
- pre-commit

View file

@ -1,22 +1,5 @@
[files]
extend-exclude = ["*.csr", "*.lock", "pnpm-lock.yaml"]
[default]
extend-ignore-re = [
"(?Rm)^.*(#|//|<!--)\\s*spellchecker:disable-line(\\s*-->)$", # Ignore a line by making it trail with a `spellchecker:disable-line` comment
"^[0-9a-f]{7,}$", # Commit hashes
# some heuristics for base64 strings
"[A-Za-z0-9+=]{72,}",
"([A-Za-z0-9+=]|\\\\\\s\\*){72,}",
"[0-9+][A-Za-z0-9+]{30,}[a-z0-9+]",
"\\$[A-Z0-9+][A-Za-z0-9+]{6,}[a-z0-9+]",
"\\b[a-z0-9+/=][A-Za-z0-9+/=]{7,}[a-z0-9+/=][A-Z]\\b",
# In the renovate config
".ontainer"
]
extend-exclude = ["*.csr"]
[default.extend-words]
"allocatedp" = "allocatedp"

View file

@ -7,6 +7,5 @@
"continuwuity",
"homeserver",
"homeservers"
],
"rust-analyzer.cargo.features": ["full"]
]
}

View file

@ -59,7 +59,7 @@ representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) or email at <tom@tcpip.uk>, <jade@continuwuity.org> and <nex@continuwuity.org> respectively.
reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or email at <tom@tcpip.uk>, <jade@continuwuity.org> and <nex@continuwuity.org> respectively.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the

View file

@ -1,143 +1,113 @@
# Contributing guide
This page is about contributing to Continuwuity. The
[development](./development.md) and [code style guide](./development/code_style.md) pages may be of interest for you as well.
This page is for about contributing to Continuwuity. The
[development](./development.md) page may be of interest for you as well.
If you would like to work on an [issue][issues] that is not assigned, preferably
ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix],
and comment on it.
### Code Style
### Linting and Formatting
Please review and follow the [code style guide](./development/code_style.md) for formatting, linting, naming conventions, and other code standards.
It is mandatory all your changes satisfy the lints (clippy, rustc, rustdoc, etc)
and your code is formatted via the **nightly** `cargo fmt`. A lot of the
`rustfmt.toml` features depend on nightly toolchain. It would be ideal if they
weren't nightly-exclusive features, but they currently still are. CI's rustfmt
uses nightly.
### Pre-commit Checks
If you need to allow a lint, please make sure it's either obvious as to why
(e.g. clippy saying redundant clone but it's actually required) or it has a
comment saying why. Do not write inefficient code for the sake of satisfying
lints. If a lint is wrong and provides a more inefficient solution or
suggestion, allow the lint and mention that in a comment.
Continuwuity uses pre-commit hooks to enforce various coding standards and catch common issues before they're committed. These checks include:
### Running CI tests locally
- Code formatting and linting
- Typo detection (both in code and commit messages)
- Checking for large files
- Ensuring proper line endings and no trailing whitespace
- Validating YAML, JSON, and TOML files
- Checking for merge conflicts
continuwuity's CI for tests, linting, formatting, audit, etc use
[`engage`][engage]. engage can be installed from nixpkgs or `cargo install
engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`.
Use `engage --help` for more usage details.
You can run these checks locally by installing [prefligit](https://github.com/j178/prefligit):
To test, format, lint, etc that CI would do, install engage, allow the `.envrc`
file using `direnv allow`, and run `engage`.
All of the tasks are defined at the [engage.toml][engage.toml] file. You can
view all of them neatly by running `engage list`
```bash
# Requires UV: https://docs.astral.sh/uv/getting-started/installation/
# Mac/linux: curl -LsSf https://astral.sh/uv/install.sh | sh
# Windows: powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
If you would like to run only a specific engage task group, use `just`:
# Install prefligit using cargo-binstall
cargo binstall prefligit
- `engage just <group>`
- Example: `engage just lints`
# Install git hooks to run checks automatically
prefligit install
If you would like to run a specific engage task in a specific group, use `just
<GROUP> [TASK]`: `engage just lints cargo-fmt`
# Run all checks
prefligit --all-files
```
The following binaries are used in [`engage.toml`][engage.toml]:
Alternatively, you can use [pre-commit](https://pre-commit.com/):
```bash
# Requires python
# Install pre-commit
pip install pre-commit
# Install the hooks
pre-commit install
# Run all checks manually
pre-commit run --all-files
```
These same checks are run in CI via the prefligit-checks workflow to ensure consistency. These must pass before the PR is merged.
### Running tests locally
Tests, compilation, and linting can be run with standard Cargo commands:
```bash
# Run tests
cargo test
# Check compilation
cargo check --workspace --features full
# Run lints
cargo clippy --workspace --features full
# Auto-fix: cargo clippy --workspace --features full --fix --allow-staged;
# Format code (must use nightly)
cargo +nightly fmt
```
- [`engage`][engage]
- `nix`
- [`direnv`][direnv]
- `rustc`
- `cargo`
- `cargo-fmt`
- `rustdoc`
- `cargo-clippy`
- [`cargo-audit`][cargo-audit]
- [`cargo-deb`][cargo-deb]
- [`lychee`][lychee]
- [`markdownlint-cli`][markdownlint-cli]
- `dpkg`
### Matrix tests
Continuwuity uses [Complement][complement] for Matrix protocol compliance testing. Complement tests are run manually by developers, and documentation on how to run these tests locally is currently being developed.
CI runs [Complement][complement], but currently does not fail if results from
the checked-in results differ with the new results. If your changes are done to
fix Matrix tests, note that in your pull request. If more Complement tests start
failing from your changes, please review the logs (they are uploaded as
artifacts) and determine if they're intended or not.
If your changes are done to fix Matrix tests, please note that in your pull request. If more Complement tests start failing from your changes, please review the logs and determine if they're intended or not.
If you'd like to run Complement locally using Nix, see the
[testing](development/testing.md) page.
[Sytest][sytest] is currently unsupported.
[Sytest][sytest] support will come soon.
### Writing documentation
Continuwuity's website uses [`mdbook`][mdbook] and is deployed via CI using Cloudflare Pages
in the [`documentation.yml`][documentation.yml] workflow file. All documentation is in the `docs/`
directory at the top level.
Continuwuity's website uses [`mdbook`][mdbook] and deployed via CI using GitHub
Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's
mdbook in the devshell. All documentation is in the `docs/` directory at the top
level. The compiled mdbook website is also uploaded as an artifact.
To build the documentation locally:
To build the documentation using Nix, run: `bin/nix-build-and-cache just .#book`
1. Install mdbook if you don't have it already:
```bash
cargo install mdbook # or cargo binstall, or another method
```
The output of the mdbook generation is in `result/`. mdbooks can be opened in
your browser from the individual HTML files without any web server needed.
2. Build the documentation:
```bash
mdbook build
```
### Inclusivity and Diversity
The output of the mdbook generation is in `public/`. You can open the HTML files directly in your browser without needing a web server.
All **MUST** code and write with inclusivity and diversity in mind. See the
[following page by Google on writing inclusive code and
documentation](https://developers.google.com/style/inclusive-documentation).
This **EXPLICITLY** forbids usage of terms like "blacklist"/"whitelist" and
"master"/"slave", [forbids gender-specific words and
phrases](https://developers.google.com/style/pronouns#gender-neutral-pronouns),
forbids ableist language like "sanity-check", "cripple", or "insane", and
forbids culture-specific language (e.g. US-only holidays or cultures).
### Commit Messages
No exceptions are allowed. Dependencies that may use these terms are allowed but
[do not replicate the name in your functions or
variables](https://developers.google.com/style/inclusive-documentation#write-around).
Continuwuity follows the [Conventional Commits](https://www.conventionalcommits.org/) specification for commit messages. This provides a standardized format that makes the commit history more readable and enables automated tools to generate changelogs.
In addition to language, write and code with the user experience in mind. This
is software that intends to be used by everyone, so make it easy and comfortable
for everyone to use. 🏳️‍⚧️
The basic structure is:
### Variable, comment, function, etc standards
```
<type>[(optional scope)]: <description>
[optional body]
[optional footer(s)]
```
The allowed types for commits are:
- `fix`: Bug fixes
- `feat`: New features
- `docs`: Documentation changes
- `style`: Changes that don't affect the meaning of the code (formatting, etc.)
- `refactor`: Code changes that neither fix bugs nor add features
- `perf`: Performance improvements
- `test`: Adding or fixing tests
- `build`: Changes to the build system or dependencies
- `ci`: Changes to CI configuration
- `chore`: Other changes that don't modify source or test files
Examples:
```
feat: add user authentication
fix(database): resolve connection pooling issue
docs: update installation instructions
```
The project uses the `committed` hook to validate commit messages in pre-commit. This ensures all commits follow the conventional format.
Rust's default style and standards with regards to [function names, variable
names, comments](https://rust-lang.github.io/api-guidelines/naming.html), etc
applies here.
### Creating pull requests
@ -148,12 +118,6 @@ This prevents us from having to ping once in a while to double check the status
of it, especially when the CI completed successfully and everything so it
*looks* done.
Before submitting a pull request, please ensure:
1. Your code passes all CI checks (formatting, linting, typo detection, etc.)
2. Your code follows the [code style guide](./development/code_style.md)
3. Your commit messages follow the conventional commits format
4. Tests are added for new functionality
5. Documentation is updated if needed
Direct all PRs/MRs to the `main` branch.
@ -161,13 +125,20 @@ By sending a pull request or patch, you are agreeing that your changes are
allowed to be licenced under the Apache-2.0 licence and all of your conduct is
in line with the Contributor's Covenant, and continuwuity's Code of Conduct.
Contribution by users who violate either of these code of conducts may not have
Contribution by users who violate either of these code of conducts will not have
their contributions accepted. This includes users who have been banned from
continuwuity Matrix rooms for Code of Conduct violations.
continuwuityMatrix rooms for Code of Conduct violations.
[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org
[complement]: https://github.com/matrix-org/complement/
[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml
[engage]: https://charles.page.computer.surgery/engage/
[sytest]: https://github.com/matrix-org/sytest/
[cargo-deb]: https://github.com/kornelski/cargo-deb
[lychee]: https://github.com/lycheeverse/lychee
[markdownlint-cli]: https://github.com/igorshubovych/markdownlint-cli
[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit
[direnv]: https://direnv.net/
[mdbook]: https://rust-lang.github.io/mdBook/
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml

2238
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@
[workspace]
resolver = "2"
members = ["src/*", "xtask/*"]
members = ["src/*"]
default-members = ["src/*"]
[workspace.package]
@ -21,7 +21,7 @@ license = "Apache-2.0"
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
rust-version = "1.86.0"
version = "0.5.0-rc.7"
version = "0.5.0-rc.6"
[workspace.metadata.crane]
name = "conduwuit"
@ -45,18 +45,18 @@ version = "0.3"
features = ["ffi", "std", "union"]
[workspace.dependencies.const-str]
version = "0.7.0"
version = "0.6.2"
[workspace.dependencies.ctor]
version = "0.5.0"
version = "0.2.9"
[workspace.dependencies.cargo_toml]
version = "0.22"
version = "0.21"
default-features = false
features = ["features"]
[workspace.dependencies.toml]
version = "0.9.5"
version = "0.8.14"
default-features = false
features = ["parse"]
@ -166,8 +166,8 @@ default-features = false
features = ["raw_value"]
# Used for appservice registration files
[workspace.dependencies.serde_yml]
version = "0.0.12"
[workspace.dependencies.serde_yaml]
version = "0.9.34"
# Used to load forbidden room/user regex from config
[workspace.dependencies.serde_regex]
@ -213,8 +213,6 @@ default-features = false
version = "0.3.19"
default-features = false
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
[workspace.dependencies.tracing-journald]
version = "0.3.1"
[workspace.dependencies.tracing-core]
version = "0.1.33"
default-features = false
@ -352,7 +350,7 @@ version = "0.1.2"
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
#branch = "conduwuit-changes"
rev = "8fb268fa2771dfc3a1c8075ef1246e7c9a0a53fd"
rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
features = [
"compat",
"rand",
@ -383,7 +381,7 @@ features = [
"unstable-msc4121",
"unstable-msc4125",
"unstable-msc4186",
"unstable-msc4203", # sending to-device events to appservices
"unstable-msc4203", # sending to-device events to appservices
"unstable-msc4210", # remove legacy mentions
"unstable-extensible-events",
"unstable-pdu",
@ -391,7 +389,7 @@ features = [
[workspace.dependencies.rust-rocksdb]
git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1"
rev = "99b0319416b64830dd6f8943e1f65e15aeef18bc"
rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd"
default-features = false
features = [
"multi-threaded-cf",
@ -411,28 +409,25 @@ default-features = false
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
[workspace.dependencies.opentelemetry]
version = "0.30.0"
version = "0.21.0"
[workspace.dependencies.tracing-flame]
version = "0.2.0"
[workspace.dependencies.tracing-opentelemetry]
version = "0.31.0"
version = "0.22.0"
[workspace.dependencies.opentelemetry_sdk]
version = "0.30.0"
version = "0.21.2"
features = ["rt-tokio"]
[workspace.dependencies.opentelemetry-otlp]
version = "0.30.0"
features = ["http", "trace", "logs", "metrics"]
[workspace.dependencies.opentelemetry-jaeger-propagator]
version = "0.30.0"
[workspace.dependencies.opentelemetry-jaeger]
version = "0.20.0"
features = ["rt-tokio"]
# optional sentry metrics for crash/panic reporting
[workspace.dependencies.sentry]
version = "0.42.0"
version = "0.37.0"
default-features = false
features = [
"backtrace",
@ -448,9 +443,9 @@ features = [
]
[workspace.dependencies.sentry-tracing]
version = "0.42.0"
version = "0.37.0"
[workspace.dependencies.sentry-tower]
version = "0.42.0"
version = "0.37.0"
# jemalloc usage
[workspace.dependencies.tikv-jemalloc-sys]
@ -479,7 +474,7 @@ features = ["use_std"]
version = "0.4"
[workspace.dependencies.nix]
version = "0.30.1"
version = "0.29.0"
default-features = false
features = ["resource"]
@ -501,7 +496,7 @@ version = "0.4.3"
default-features = false
[workspace.dependencies.termimad]
version = "0.34.0"
version = "0.31.2"
default-features = false
[workspace.dependencies.checked_ops]
@ -518,14 +513,6 @@ version = "1.0"
[workspace.dependencies.proc-macro2]
version = "1.0"
[workspace.dependencies.parking_lot]
version = "0.12.4"
features = ["hardware-lock-elision", "deadlock_detection"] # TODO: Check if deadlock_detection has a perf impact, if it does only enable with debug_assertions
# Use this when extending with_lock::WithLock to parking_lot
[workspace.dependencies.lock_api]
version = "0.4.13"
[workspace.dependencies.bytesize]
version = "2.0"
@ -539,21 +526,16 @@ version = "0.2"
version = "0.2"
[workspace.dependencies.minicbor]
version = "2.1.1"
version = "0.26.3"
features = ["std"]
[workspace.dependencies.minicbor-serde]
version = "0.6.0"
version = "0.4.1"
features = ["std"]
[workspace.dependencies.maplit]
version = "1.0.2"
[workspace.dependencies.ldap3]
version = "0.11.5"
default-features = false
features = ["sync", "tls-rustls"]
#
# Patches
#
@ -574,11 +556,11 @@ rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0002-add-tab-completion-callback.patch
# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0001-add-event-for-ctrl.patch
# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50
# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b
[patch.crates-io.rustyline-async]
git = "https://forgejo.ellis.link/continuwuation/rustyline-async"
rev = "e9f01cf8c6605483cb80b3b0309b400940493d7f"
rev = "deaeb0694e2083f53d363b648da06e10fc13900c"
# adds LIFO queue scheduling; this should be updated with PR progress.
[patch.crates-io.event-listener]
@ -598,11 +580,12 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da"
git = "https://forgejo.ellis.link/continuwuation/hyper-util"
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
# Allows no-aaaa option in resolv.conf
# Use 1-indexed line numbers when displaying parse error messages
# allows no-aaaa option in resolv.conf
# bumps rust edition and toolchain to 1.86.0 and 2024
# use sat_add on line number errors
[patch.crates-io.resolv-conf]
git = "https://forgejo.ellis.link/continuwuation/resolv-conf"
rev = "ebbbec1cb965b487a0150f5d007e96c05e3d72af"
rev = "200e958941d522a70c5877e3d846f55b5586c68d"
#
# Our crates
@ -654,11 +637,6 @@ package = "conduwuit_build_metadata"
path = "src/build_metadata"
default-features = false
[workspace.dependencies.conduwuit]
package = "conduwuit"
path = "src/main"
###############################################################################
#
# Release profiles
@ -767,6 +745,24 @@ incremental = true
[profile.dev.package.conduwuit_core]
inherits = "dev"
#rustflags = [
# '--cfg', 'conduwuit_mods',
# '-Ztime-passes',
# '-Zmir-opt-level=0',
# '-Ztls-model=initial-exec',
# '-Cprefer-dynamic=true',
# '-Zstaticlib-prefer-dynamic=true',
# '-Zstaticlib-allow-rdylib-deps=true',
# '-Zpacked-bundled-libs=false',
# '-Zplt=true',
# '-Clink-arg=-Wl,--as-needed',
# '-Clink-arg=-Wl,--allow-shlib-undefined',
# '-Clink-arg=-Wl,-z,lazy',
# '-Clink-arg=-Wl,-z,unique',
# '-Clink-arg=-Wl,-z,nodlopen',
# '-Clink-arg=-Wl,-z,nodelete',
#]
[profile.dev.package.conduwuit]
inherits = "dev"
#rustflags = [
@ -856,7 +852,7 @@ unused-qualifications = "warn"
#unused-results = "warn" # TODO
## some sadness
mismatched_lifetime_syntaxes = "allow" # TODO!
elided_named_lifetimes = "allow" # TODO!
let_underscore_drop = "allow"
missing_docs = "allow"
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
@ -995,6 +991,3 @@ literal_string_with_formatting_args = { level = "allow", priority = 1 }
needless_raw_string_hashes = "allow"
# TODO: Enable this lint & fix all instances
collapsible_if = "allow"

View file

@ -4,10 +4,6 @@
## A community-driven [Matrix](https://matrix.org/) homeserver in Rust
[![Chat on Matrix](https://img.shields.io/matrix/continuwuity%3Acontinuwuity.org?server_fqdn=matrix.continuwuity.org&fetchMode=summary&logo=matrix)](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) [![Join the space](https://img.shields.io/matrix/space%3Acontinuwuity.org?server_fqdn=matrix.continuwuity.org&fetchMode=summary&logo=matrix&label=space)](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
<!-- ANCHOR_END: catchphrase -->
[continuwuity] is a Matrix homeserver written in Rust.
@ -15,13 +11,11 @@ It's a community continuation of the [conduwuit](https://github.com/girlbossceo/
<!-- ANCHOR: body -->
[![forgejo.ellis.link](https://img.shields.io/badge/Ellis%20Git-main+packages-green?style=flat&logo=forgejo&labelColor=fff)](https://forgejo.ellis.link/continuwuation/continuwuity) [![Stars](https://forgejo.ellis.link/continuwuation/continuwuity/badges/stars.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/stars) [![Issues](https://forgejo.ellis.link/continuwuation/continuwuity/badges/issues/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [![Pull Requests](https://forgejo.ellis.link/continuwuation/continuwuity/badges/pulls/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open)
[![forgejo.ellis.link](https://img.shields.io/badge/Ellis%20Git-main+packages-green?style=flat&logo=forgejo&labelColor=fff)](https://forgejo.ellis.link/continuwuation/continuwuity) ![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/stars.svg?style=flat) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/issues/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/pulls/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open)
[![GitHub](https://img.shields.io/badge/GitHub-mirror-blue?style=flat&logo=github&labelColor=fff&logoColor=24292f)](https://github.com/continuwuity/continuwuity) [![Stars](https://img.shields.io/github/stars/continuwuity/continuwuity?style=flat)](https://github.com/continuwuity/continuwuity/stargazers)
[![GitHub](https://img.shields.io/badge/GitHub-mirror-blue?style=flat&logo=github&labelColor=fff&logoColor=24292f)](https://github.com/continuwuity/continuwuity) ![](https://img.shields.io/github/stars/continuwuity/continuwuity?style=flat)
[![GitLab](https://img.shields.io/badge/GitLab-mirror-blue?style=flat&logo=gitlab&labelColor=fff)](https://gitlab.com/continuwuity/continuwuity) [![Stars](https://img.shields.io/gitlab/stars/continuwuity/continuwuity?style=flat)](https://gitlab.com/continuwuity/continuwuity/-/starrers)
[![Codeberg](https://img.shields.io/badge/Codeberg-mirror-2185D0?style=flat&logo=codeberg&labelColor=fff)](https://codeberg.org/continuwuity/continuwuity) [![Stars](https://codeberg.org/continuwuity/continuwuity/badges/stars.svg?style=flat)](https://codeberg.org/continuwuity/continuwuity/stars)
[![Codeberg](https://img.shields.io/badge/Codeberg-mirror-2185D0?style=flat&logo=codeberg&labelColor=fff)](https://codeberg.org/nexy7574/continuwuity) ![](https://codeberg.org/nexy7574/continuwuity/badges/stars.svg?style=flat)
### Why does this exist?
@ -57,7 +51,7 @@ Continuwuity aims to:
### Can I try it out?
Check out the [documentation](https://continuwuity.org) for installation instructions.
Check out the [documentation](introduction) for installation instructions.
There are currently no open registration Continuwuity instances available.
@ -65,6 +59,8 @@ There are currently no open registration Continuwuity instances available.
We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues).
- [Replacing old conduwuit links with working continuwuity links](https://forgejo.ellis.link/continuwuation/continuwuity/issues/742)
- [Getting CI and docs deployment working on the new Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues/740)
- [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747)
- [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0)
- [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119)
@ -115,7 +111,7 @@ When incorporating code from other forks:
#### Contact
Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) and [space](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) to chat with us about the project!
Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [space](https://matrix.to/#/#space:continuwuity.org) to chat with us about the project!
<!-- ANCHOR_END: footer -->

77
arch/conduwuit.service Normal file
View file

@ -0,0 +1,77 @@
[Unit]
Description=Continuwuity - Matrix homeserver
Wants=network-online.target
After=network-online.target
Documentation=https://continuwuity.org/
RequiresMountsFor=/var/lib/private/conduwuit
Alias=matrix-conduwuit.service
[Service]
DynamicUser=yes
Type=notify-reload
ReloadSignal=SIGUSR1
TTYPath=/dev/tty25
DeviceAllow=char-tty
StandardInput=tty-force
StandardOutput=tty
StandardError=journal+console
TTYReset=yes
# uncomment to allow buffer to be cleared every restart
TTYVTDisallocate=no
TTYColumns=120
TTYRows=40
AmbientCapabilities=
CapabilityBoundingSet=
DevicePolicy=closed
LockPersonality=yes
MemoryDenyWriteExecute=yes
NoNewPrivileges=yes
#ProcSubset=pid
ProtectClock=yes
ProtectControlGroups=yes
ProtectHome=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectKernelTunables=yes
ProtectProc=invisible
ProtectSystem=strict
PrivateDevices=yes
PrivateMounts=yes
PrivateTmp=yes
PrivateUsers=yes
PrivateIPC=yes
RemoveIPC=yes
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
StateDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
ExecStart=/usr/bin/conduwuit
Restart=on-failure
RestartSec=5
TimeoutStopSec=4m
TimeoutStartSec=4m
StartLimitInterval=1m
StartLimitBurst=5
[Install]
WantedBy=multi-user.target

View file

@ -1,3 +0,0 @@
style = "conventional"
subject_length = 72
allowed_types = ["ci", "build", "fix", "feat", "chore", "docs", "style", "refactor", "perf", "test"]

View file

@ -79,11 +79,9 @@
# This is the only directory where continuwuity will save its data,
# including media. Note: this was previously "/var/lib/matrix-conduit".
#
# YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
# `systemd` service. The service file sets it to `/var/lib/conduwuit`
# using an environment variable and also grants write access.
# YOU NEED TO EDIT THIS.
#
# example: "/var/lib/conduwuit"
# example: "/var/lib/continuwuity"
#
#database_path =
@ -327,37 +325,12 @@
#
#well_known_timeout = 10
# Federation client connection timeout (seconds). You should not set this
# to high values, as dead homeservers can significantly slow down
# federation, specifically key retrieval, which will take roughly the
# amount of time you configure here given that a homeserver doesn't
# respond. This will cause most clients to time out /keys/query, causing
# E2EE and device verification to fail.
#
#federation_conn_timeout = 10
# Federation client request timeout (seconds). You most definitely want
# this to be high to account for extremely large room joins, slow
# homeservers, your own resources etc.
#
#federation_timeout = 300
# MSC4284 Policy server request timeout (seconds). Generally policy
# servers should respond near instantly, however may slow down under
# load. If a policy server doesn't respond in a short amount of time, the
# room it is configured in may become unusable if this limit is set too
# high. 10 seconds is a good default, however dropping this to 3-5 seconds
# can be acceptable.
#
# Please be aware that policy requests are *NOT* currently re-tried, so if
# a spam check request fails, the event will be assumed to be not spam,
# which in some cases may result in spam being sent to or received from
# the room that would typically be prevented.
#
# About policy servers: https://matrix.org/blog/2025/04/introducing-policy-servers/
#
#policy_server_request_timeout = 10
# Federation client idle connection pool timeout (seconds).
#
#federation_idle_timeout = 25
@ -425,22 +398,6 @@
#
#allow_registration = false
# If registration is enabled, and this setting is true, new users
# registered after the first admin user will be automatically suspended
# and will require an admin to run `!admin users unsuspend <user_id>`.
#
# Suspended users are still able to read messages, make profile updates,
# leave rooms, and deactivate their account, however cannot send messages,
# invites, or create/join or otherwise modify rooms.
# They are effectively read-only.
#
# If you want to use this to screen people who register on your server,
# you should add a room to `auto_join_rooms` that is public, and contains
# information that new users can read (since they won't be able to DM
# anyone, or send a message, and may be confused).
#
#suspend_on_register = false
# Enabling this setting opens registration to anyone without restrictions.
# This makes your server vulnerable to abuse
#
@ -468,26 +425,6 @@
#
#registration_token_file =
# The public site key for reCaptcha. If this is provided, reCaptcha
# becomes required during registration. If both captcha *and*
# registration token are enabled, both will be required during
# registration.
#
# IMPORTANT: "Verify the origin of reCAPTCHA solutions" **MUST** BE
# DISABLED IF YOU WANT THE CAPTCHA TO WORK IN 3RD PARTY CLIENTS, OR
# CLIENTS HOSTED ON DOMAINS OTHER THAN YOUR OWN!
#
# Registration must be enabled (`allow_registration` must be true) for
# this to have any effect.
#
#recaptcha_site_key =
# The private site key for reCaptcha.
# If this is omitted, captcha registration will not work,
# even if `recaptcha_site_key` is set.
#
#recaptcha_private_site_key =
# Controls whether encrypted rooms and events are allowed.
#
#allow_encryption = true
@ -591,19 +528,13 @@
#
#default_room_version = 11
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
# Jaeger) that supports the OpenTelemetry Protocol.
# This item is undocumented. Please contribute documentation for it.
#
# Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
# environment variable (defaults to http://localhost:4318).
#
#allow_otlp = false
#allow_jaeger = false
# Filter for OTLP tracing spans. This controls which spans are exported
# to the OTLP collector.
# This item is undocumented. Please contribute documentation for it.
#
#otlp_filter = "info"
#jaeger_filter = "info"
# If the 'perf_measurements' compile-time feature is enabled, enables
# collecting folded stack trace profile of tracing spans using
@ -729,21 +660,6 @@
#
#log_thread_ids = false
# Enable journald logging on Unix platforms
#
# When enabled, log output will be sent to the systemd journal
# This is only supported on Unix platforms
#
#log_to_journald = false
# The syslog identifier to use with journald logging
#
# Only used when journald logging is enabled
#
# Defaults to the binary name
#
#journald_identifier =
# OpenID token expiration/TTL in seconds.
#
# These are the OpenID tokens that are primarily used for Matrix account
@ -1137,13 +1053,6 @@
#
#presence_timeout_remote_users = true
# Allow local read receipts.
#
# Disabling this will effectively also disable outgoing federated read
# receipts.
#
#allow_local_read_receipts = true
# Allow receiving incoming read receipts from remote servers.
#
#allow_incoming_read_receipts = true
@ -1152,13 +1061,6 @@
#
#allow_outgoing_read_receipts = true
# Allow local typing updates.
#
# Disabling this will effectively also disable outgoing federated typing
# updates.
#
#allow_local_typing = true
# Allow outgoing typing updates to federation.
#
#allow_outgoing_typing = true
@ -1704,10 +1606,6 @@
#
#config_reload_signal = true
# This item is undocumented. Please contribute documentation for it.
#
#ldap = false
[global.tls]
# Path to a valid TLS certificate file.
@ -1786,91 +1684,3 @@
# is 33.55MB. Setting it to 0 disables blurhashing.
#
#blurhash_max_raw_size = 33554432
[global.ldap]
# Whether to enable LDAP login.
#
# example: "true"
#
#enable = false
# Whether to force LDAP authentication or authorize classical password
# login.
#
# example: "true"
#
#ldap_only = false
# URI of the LDAP server.
#
# example: "ldap://ldap.example.com:389"
#
#uri = ""
# Root of the searches.
#
# example: "ou=users,dc=example,dc=org"
#
#base_dn = ""
# Bind DN if anonymous search is not enabled.
#
# You can use the variable `{username}` that will be replaced by the
# entered username. In such case, the password used to bind will be the
# one provided for the login and not the one given by
# `bind_password_file`. Beware: automatically granting admin rights will
# not work if you use this direct bind instead of a LDAP search.
#
# example: "cn=ldap-reader,dc=example,dc=org" or
# "cn={username},ou=users,dc=example,dc=org"
#
#bind_dn = ""
# Path to a file on the system that contains the password for the
# `bind_dn`.
#
# The server must be able to access the file, and it must not be empty.
#
#bind_password_file = ""
# Search filter to limit user searches.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(&(objectClass=person)(memberOf=matrix))"
#
#filter = "(objectClass=*)"
# Attribute to use to uniquely identify the user.
#
# example: "uid" or "cn"
#
#uid_attribute = "uid"
# Attribute containing the display name of the user.
#
# example: "givenName" or "sn"
#
#name_attribute = "givenName"
# Root of the searches for admin users.
#
# Defaults to `base_dn` if empty.
#
# example: "ou=admins,dc=example,dc=org"
#
#admin_base_dn = ""
# The LDAP search filter to find administrative users for continuwuity.
#
# If left blank, administrative state must be configured manually for each
# user.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
#
#admin_filter = ""

29
debian/README.md vendored Normal file
View file

@ -0,0 +1,29 @@
# Continuwuity for Debian
Information about downloading and deploying the Debian package. This may also be
referenced for other `apt`-based distros such as Ubuntu.
### Installation
It is recommended to see the [generic deployment guide](../deploying/generic.md)
for further information if needed as usage of the Debian package is generally
related.
No `apt` repository is currently offered yet, it is in the works/development.
### Configuration
When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml`
as the default config. The config mentions things required to be changed before
starting.
You can tweak more detailed settings by uncommenting and setting the config
options in `/etc/conduwuit/conduwuit.toml`.
### Running
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary is installed at `/usr/sbin/conduwuit`.
This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate.
Consult various online documentation and guides on setting up a reverse proxy and TLS. Caddy is documented at the [generic deployment guide](../deploying/generic.md#setting-up-the-reverse-proxy) as it's the easiest and most user friendly.

View file

@ -1,24 +1,22 @@
[Unit]
Description=Continuwuity - Matrix homeserver
Documentation=https://continuwuity.org/
Wants=network-online.target
After=network-online.target
Documentation=https://continuwuity.org/
Alias=matrix-conduwuit.service
[Service]
DynamicUser=yes
User=conduwuit
Group=conduwuit
Type=notify-reload
ReloadSignal=SIGUSR1
Type=notify
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
ExecStart=/usr/sbin/conduwuit
ExecStart=/usr/bin/conduwuit
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
AmbientCapabilities=
CapabilityBoundingSet=
@ -51,17 +49,16 @@ SystemCallArchitectures=native
SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
#StateDirectory=conduwuit
StateDirectory=conduwuit
ConfigurationDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Restart=on-failure
RestartSec=5
TimeoutStopSec=4m
TimeoutStartSec=4m
TimeoutStopSec=2m
TimeoutStartSec=2m
StartLimitInterval=1m
StartLimitBurst=5

View file

44
debian/postinst vendored Normal file
View file

@ -0,0 +1,44 @@
#!/bin/sh
set -e
# TODO: implement debconf support that is maintainable without duplicating the config
#. /usr/share/debconf/confmodule
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
case "$1" in
configure)
# Create the `conduwuit` user if it does not exist yet.
if ! getent passwd conduwuit > /dev/null ; then
echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2
adduser --system --group --quiet \
--home "$CONDUWUIT_DATABASE_PATH" \
--disabled-login \
--shell "/usr/sbin/nologin" \
conduwuit
fi
# Create the database path if it does not exist yet and fix up ownership
# and permissions for the config.
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
# symlink the previous location for compatibility if it does not exist yet.
if ! test -L "/var/lib/matrix-conduit" ; then
ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
fi
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
chmod -v 740 "$CONDUWUIT_DATABASE_PATH"
echo ''
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
echo 'To start the server, run: systemctl start conduwuit.service'
echo ''
;;
esac
#DEBHELPER#

View file

@ -20,18 +20,24 @@ case $1 in
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
if test -L "$CONDUWUIT_CONFIG_PATH"; then
echo "Deleting continuwuity configuration files"
echo "Deleting conduwuit configuration files"
rm -v -r "$CONDUWUIT_CONFIG_PATH"
fi
fi
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
if test -L "$CONDUWUIT_DATABASE_PATH"; then
echo "Deleting continuwuity database directory"
echo "Deleting conduwuit database directory"
rm -r "$CONDUWUIT_DATABASE_PATH"
fi
fi
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
echo "Removing matrix-conduit symlink"
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
fi
fi
;;
esac

View file

@ -1,16 +1,15 @@
ARG RUST_VERSION=1
ARG DEBIAN_VERSION=bookworm
FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-${DEBIAN_VERSION} AS base
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-${DEBIAN_VERSION} AS toolchain
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS base
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS toolchain
# Prevent deletion of apt cache
RUN rm -f /etc/apt/apt.conf.d/docker-clean
# Match Rustc version as close as possible
# rustc -vV
ARG LLVM_VERSION=20
ARG LLVM_VERSION=19
# ENV RUSTUP_TOOLCHAIN=${RUST_VERSION}
# Install repo tools
@ -20,18 +19,10 @@ ARG LLVM_VERSION=20
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
pkg-config make jq \
curl git software-properties-common \
clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \
curl git \
file
# LLVM packages
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
curl https://apt.llvm.org/llvm.sh > llvm.sh && \
chmod +x llvm.sh && \
./llvm.sh ${LLVM_VERSION} && \
rm llvm.sh
# Create symlinks for LLVM tools
RUN <<EOF
set -o xtrace
@ -48,13 +39,11 @@ EOF
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.15.4
ENV BINSTALL_VERSION=1.12.3
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
ENV LDDTREE_VERSION=0.3.7
# renovate: datasource=crate depName=timelord-cli
ENV TIMELORD_VERSION=3.0.1
# Install unpackaged tools
RUN <<EOF
@ -62,7 +51,6 @@ RUN <<EOF
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
cargo binstall --no-confirm timelord-cli --version $TIMELORD_VERSION
EOF
# Set up xx (cross-compilation scripts)
@ -81,20 +69,17 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
WORKDIR /app
COPY ./rust-toolchain.toml .
RUN rustc --version \
&& xx-cargo --setup-target-triple
&& rustup target add $(xx-cargo --print-target-triple)
# Build binary
# Configure incremental compilation based on build context
ARG CARGO_INCREMENTAL=0
RUN echo "CARGO_INCREMENTAL=${CARGO_INCREMENTAL}" >> /etc/environment
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
# Configure pkg-config
RUN <<EOF
set -o xtrace
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
fi
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
EOF
@ -115,17 +100,16 @@ RUN <<EOF
EOF
# Apply CPU-specific optimizations if TARGET_CPU is provided
ARG TARGET_CPU
ARG TARGET_CPU=
RUN <<EOF
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
EOF
# Prepare output directories
@ -137,23 +121,18 @@ FROM toolchain AS builder
# Get source
COPY . .
# Restore timestamps from timelord cache if available
RUN --mount=type=cache,target=/timelord/ \
echo "Restoring timestamps from timelord cache"; \
timelord sync --source-dir /app --cache-dir /timelord;
ARG TARGETPLATFORM
# Verify environment configuration
RUN xx-cargo --print-target-triple
# Conduwuit version info
ARG GIT_COMMIT_HASH
ARG GIT_COMMIT_HASH_SHORT
ARG GIT_REMOTE_URL
ARG GIT_REMOTE_COMMIT_URL
ARG CONDUWUIT_VERSION_EXTRA
ARG CONTINUWUITY_VERSION_EXTRA
ARG GIT_COMMIT_HASH=
ARG GIT_COMMIT_HASH_SHORT=
ARG GIT_REMOTE_URL=
ARG GIT_REMOTE_COMMIT_URL=
ARG CONDUWUIT_VERSION_EXTRA=
ARG CONTINUWUITY_VERSION_EXTRA=
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
@ -161,12 +140,11 @@ ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
ARG RUST_PROFILE=release
# Build the binary
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \
--mount=type=cache,target=/app/target,id=cargo-target-${TARGETPLATFORM} \
bash <<'EOF'
set -o allexport
set -o xtrace
@ -175,13 +153,13 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
jq -r ".target_directory"))
mkdir /out/sbin
PACKAGE=conduwuit
xx-cargo build --locked --profile ${RUST_PROFILE} \
xx-cargo build --locked --release \
-p $PACKAGE;
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
for BINARY in "${BINARIES[@]}"; do
echo $BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
done
EOF
@ -208,57 +186,32 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
EOF
# Extract dynamically linked dependencies
RUN <<'DEPS_EOF'
RUN <<EOF
set -o xtrace
mkdir /out/libs /out/libs-root
# Process each binary
mkdir /out/libs
mkdir /out/libs-root
for BINARY in /out/sbin/*; do
if lddtree_output=$(lddtree "$BINARY" 2>/dev/null) && [ -n "$lddtree_output" ]; then
echo "$lddtree_output" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | \
awk '{dest = ($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2; print "install -D " $1 " " dest}' | \
while read cmd; do eval "$cmd"; done
fi
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
done
# Show what will be copied to runtime
echo "=== Libraries being copied to runtime image:"
find /out/libs* -type f 2>/dev/null | sort || echo "No libraries found"
DEPS_EOF
FROM ubuntu:latest AS prepper
# Create layer structure
RUN mkdir -p /layer1/etc/ssl/certs \
/layer2/usr/lib \
/layer3/sbin /layer3/sbom
# Copy SSL certs and root-path libraries to layer1 (ultra-stable)
COPY --from=base /etc/ssl/certs /layer1/etc/ssl/certs
COPY --from=builder /out/libs-root/ /layer1/
# Copy application libraries to layer2 (semi-stable)
COPY --from=builder /out/libs/ /layer2/usr/lib/
# Copy binaries and SBOM to layer3 (volatile)
COPY --from=builder /out/sbin/ /layer3/sbin/
COPY --from=builder /out/sbom/ /layer3/sbom/
# Fix permissions after copying
RUN chmod -R 755 /layer1 /layer2 /layer3
EOF
FROM scratch
WORKDIR /
# Copy ultra-stable layer (SSL certs, system libraries)
COPY --from=prepper /layer1/ /
# Copy root certs for tls into image
# You can also mount the certs from the host
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
COPY --from=base /etc/ssl/certs /etc/ssl/certs
# Copy semi-stable layer (application libraries)
COPY --from=prepper /layer2/ /
# Copy our build
COPY --from=builder /out/sbin/ /sbin/
# Copy SBOM
COPY --from=builder /out/sbom/ /sbom/
# Copy volatile layer (binaries, SBOM)
COPY --from=prepper /layer3/ /
# Copy dynamic libraries to root
COPY --from=builder /out/libs-root/ /
COPY --from=builder /out/libs/ /usr/lib/
# Inform linker where to find libraries
ENV LD_LIBRARY_PATH=/usr/lib

View file

@ -1,200 +0,0 @@
# Why does this exist?
# Debian doesn't provide prebuilt musl packages
# rocksdb requires a prebuilt liburing, and linking fails if a gnu one is provided
ARG RUST_VERSION=1
ARG ALPINE_VERSION=3.22
FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS base
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS toolchain
# Install repo tools and dependencies
RUN --mount=type=cache,target=/etc/apk/cache apk add \
build-base pkgconfig make jq bash \
curl git file \
llvm-dev clang clang-static lld
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.15.4
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
ENV LDDTREE_VERSION=0.3.7
# Install unpackaged tools
RUN <<EOF
set -o xtrace
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
EOF
# Set up xx (cross-compilation scripts)
COPY --from=xx / /
ARG TARGETPLATFORM
# Install libraries linked by the binary
RUN --mount=type=cache,target=/etc/apk/cache xx-apk add musl-dev gcc g++ liburing-dev
# Set up Rust toolchain
WORKDIR /app
COPY ./rust-toolchain.toml .
RUN rustc --version \
&& xx-cargo --setup-target-triple
# Build binary
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
# Configure pkg-config
RUN <<EOF
set -o xtrace
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
fi
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
EOF
# Configure cc to use clang version
RUN <<EOF
set -o xtrace
echo "CC=clang" >> /etc/environment
echo "CXX=clang++" >> /etc/environment
EOF
# Cross-language LTO
RUN <<EOF
set -o xtrace
echo "CFLAGS=-flto" >> /etc/environment
echo "CXXFLAGS=-flto" >> /etc/environment
# Linker is set to target-compatible clang by xx
echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment
EOF
# Apply CPU-specific optimizations if TARGET_CPU is provided
ARG TARGET_CPU
RUN <<EOF
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
EOF
# Prepare output directories
RUN mkdir /out
FROM toolchain AS builder
# Get source
COPY . .
ARG TARGETPLATFORM
# Verify environment configuration
RUN xx-cargo --print-target-triple
# Conduwuit version info
ARG GIT_COMMIT_HASH
ARG GIT_COMMIT_HASH_SHORT
ARG GIT_REMOTE_URL
ARG GIT_REMOTE_COMMIT_URL
ARG CONDUWUIT_VERSION_EXTRA
ARG CONTINUWUITY_VERSION_EXTRA
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
ARG RUST_PROFILE=release
# Build the binary
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-musl-${RUST_PROFILE} \
bash <<'EOF'
set -o allexport
set -o xtrace
. /etc/environment
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
jq -r ".target_directory"))
mkdir /out/sbin
PACKAGE=conduwuit
xx-cargo build --locked --profile ${RUST_PROFILE} \
-p $PACKAGE --no-default-features --features bindgen-static,release_max_log_level,standard;
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
for BINARY in "${BINARIES[@]}"; do
echo $BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
done
EOF
# Generate Software Bill of Materials (SBOM)
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
bash <<'EOF'
set -o xtrace
mkdir /out/sbom
typeset -A PACKAGES
for BINARY in /out/sbin/*; do
BINARY_BASE=$(basename ${BINARY})
package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name")
if [ -z "$package" ]; then
continue
fi
PACKAGES[$package]=1
done
for PACKAGE in $(echo ${!PACKAGES[@]}); do
echo $PACKAGE
cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json
done
EOF
# Extract dynamically linked dependencies
RUN <<EOF
set -o xtrace
mkdir /out/libs
mkdir /out/libs-root
for BINARY in /out/sbin/*; do
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
done
EOF
FROM scratch
WORKDIR /
# Copy root certs for tls into image
# You can also mount the certs from the host
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
COPY --from=base /etc/ssl/certs /etc/ssl/certs
# Copy our build
COPY --from=builder /out/sbin/ /sbin/
# Copy SBOM
COPY --from=builder /out/sbom/ /sbom/
# Copy dynamic libraries to root
COPY --from=builder /out/libs-root/ /
COPY --from=builder /out/libs/ /usr/lib/
# Inform linker where to find libraries
ENV LD_LIBRARY_PATH=/usr/lib
# Continuwuity default port
EXPOSE 8008
CMD ["/sbin/conduwuit"]

View file

@ -15,10 +15,8 @@
- [Appservices](appservices.md)
- [Maintenance](maintenance.md)
- [Troubleshooting](troubleshooting.md)
- [Admin Command Reference](admin_reference.md)
- [Development](development.md)
- [Contributing](contributing.md)
- [Code Style Guide](development/code_style.md)
- [Testing](development/testing.md)
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
- [Community (and Guidelines)](community.md)

File diff suppressed because it is too large Load diff

View file

@ -3,7 +3,7 @@
## Getting help
If you run into any problems while setting up an Appservice: ask us in
[#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) or
[#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or
[open an issue on Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new).
## Set up the appservice - general instructions

View file

@ -75,9 +75,9 @@ subject to enforcement action.
## Matrix Community
These Community Guidelines apply to the entire
[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) and its rooms, including:
[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including:
### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org)
This room is for support and discussions about Continuwuity. Ask questions, share insights, and help
each other out while adhering to these guidelines.
@ -85,7 +85,7 @@ each other out while adhering to these guidelines.
We ask that this room remain focused on the Continuwuity software specifically: the team are
typically happy to engage in conversations about related subjects in the off-topic room.
### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org)
For off-topic community conversations about any subject. While this room allows for a wide range of
topics, the same guidelines apply. Please keep discussions respectful and inclusive, and avoid
@ -95,7 +95,7 @@ care and respect for diverse viewpoints.
General topics, such as world events, are welcome as long as they follow the guidelines. If a member
of the team asks for the conversation to end, please respect their decision.
### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org)
This room is dedicated to discussing active development of Continuwuity, including ongoing issues or
code development. Collaboration here must follow these guidelines, and please consider raising

View file

@ -9,11 +9,24 @@
</details>
## systemd unit file
## Debian systemd unit file
<details>
<summary>systemd unit file</summary>
<summary>Debian systemd unit file</summary>
```
{{#include ../../pkg/conduwuit.service}}
{{#include ../../debian/conduwuit.service}}
```
</details>
## Arch Linux systemd unit file
<details>
<summary>Arch Linux systemd unit file</summary>
```
{{#include ../../arch/conduwuit.service}}
```
</details>

View file

@ -1,5 +1,3 @@
# Continuwuity for Arch Linux
Continuwuity is available in the `archlinuxcn` repository and AUR with the same package name `continuwuity`, which includes the latest tagged version. The development version is available on AUR as `continuwuity-git`.
Simply install the `continuwuity` package. Configure the service in `/etc/conduwuit/conduwuit.toml`, then enable and start the continuwuity.service.
Continuwuity does not have any Arch Linux packages at this time.

View file

@ -1 +1 @@
{{#include ../../pkg/debian/README.md}}
{{#include ../../debian/README.md}}

View file

@ -12,15 +12,6 @@ services:
#- ./continuwuity.toml:/etc/continuwuity.toml
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
- "traefik.http.routers.continuwuity.entrypoints=websecure" # your HTTPS entry point
- "traefik.http.routers.continuwuity.tls=true"
- "traefik.http.routers.continuwuity.service=continuwuity"
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
# possibly, depending on your config:
# - "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
environment:
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity

View file

@ -34,3 +34,4 @@ services:
# - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt"
# vim: ts=2:sw=2:expandtab

View file

@ -26,7 +26,7 @@ services:
restart: unless-stopped
volumes:
- db:/var/lib/continuwuity
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
#- ./continuwuity.toml:/etc/continuwuity.toml
environment:
CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS

View file

@ -8,18 +8,10 @@ services:
restart: unless-stopped
volumes:
- db:/var/lib/continuwuity
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
#- ./continuwuity.toml:/etc/continuwuity.toml
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
- "traefik.http.routers.continuwuity.entrypoints=websecure"
- "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
# Uncomment and adjust the following if you want to use middleware
# - "traefik.http.routers.continuwuity.middlewares=secureHeaders@file"
environment:
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'

View file

@ -2,7 +2,7 @@
## Docker
To run Continuwuity with Docker, you can either build the image yourself or pull it
To run Continuwuity with Docker you can either build the image yourself or pull it
from a registry.
### Use a registry
@ -26,7 +26,7 @@ to pull it to your machine.
### Run
When you have the image, you can simply run it with
When you have the image you can simply run it with
```bash
docker run -d -p 8448:6167 \
@ -36,7 +36,7 @@ docker run -d -p 8448:6167 \
--name continuwuity $LINK
```
or you can use [Docker Compose](#docker-compose).
or you can use [docker compose](#docker-compose).
The `-d` flag lets the container run in detached mode. You may supply an
optional `continuwuity.toml` config file, the example config can be found
@ -46,15 +46,15 @@ using env vars. For an overview of possible values, please take a look at the
[`docker-compose.yml`](docker-compose.yml) file.
If you just want to test Continuwuity for a short time, you can use the `--rm`
flag, which cleans up everything related to your container after you stop
flag, which will clean up everything related to your container after you stop
it.
### Docker-compose
If the `docker run` command is not suitable for you or your setup, you can also use one
If the `docker run` command is not for you or your setup, you can also use one
of the provided `docker-compose` files.
Depending on your proxy setup, you can use one of the following files:
Depending on your proxy setup, you can use one of the following files;
- If you already have a `traefik` instance set up, use
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
@ -65,7 +65,7 @@ Depending on your proxy setup, you can use one of the following files:
`example.com` placeholders with your own domain
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
When picking the Traefik-related compose file, rename it to
When picking the traefik-related compose file, rename it so it matches
`docker-compose.yml`, and rename the override file to
`docker-compose.override.yml`. Edit the latter with the values you want for your
server.
@ -77,18 +77,18 @@ create the `caddy` network before spinning up the containers:
docker network create caddy
```
After that, you can rename it to `docker-compose.yml` and spin up the
After that, you can rename it so it matches `docker-compose.yml` and spin up the
containers!
Additional info about deploying Continuwuity can be found [here](generic.md).
### Build
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables efficient multi-platform builds.
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently.
The resulting images are widely compatible with Docker and other container runtimes like Podman or containerd.
The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd.
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition.
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition.
To build an image locally using Docker Buildx, you can typically run a command like:
@ -109,8 +109,8 @@ Refer to the Docker Buildx documentation for more advanced build options.
### Run
If you have already built the image or want to use one from the registries, you
can start the container and everything else in the compose file in detached
If you already have built the image or want to use one from the registries, you
can just start the container and everything else in the compose file in detached
mode with:
```bash
@ -121,24 +121,22 @@ docker compose up -d
### Use Traefik as Proxy
As a container user, you probably know about Traefik. It is an easy-to-use
reverse proxy for making containerized apps and services available through the
As a container user, you probably know about Traefik. It is a easy to use
reverse proxy for making containerized app and services available through the
web. With the two provided files,
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy
to deploy and use Continuwuity, with a small caveat. If you have already looked at
the files, you should have seen the `well-known` service, which is the
small caveat. Traefik is simply a proxy and load balancer and cannot
serve any kind of content. For Continuwuity to federate, we need to either
expose ports `443` and `8448` or serve two endpoints: `.well-known/matrix/client`
to deploy and use Continuwuity, with a little caveat. If you already took a look at
the files, then you should have seen the `well-known` service, and that is the
little caveat. Traefik is simply a proxy and loadbalancer and is not able to
serve any kind of content, but for Continuwuity to federate, we need to either
expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client`
and `.well-known/matrix/server`.
With the service `well-known`, we use a single `nginx` container that serves
With the service `well-known` we use a single `nginx` container that will serve
those two files.
Alternatively, you can use Continuwuity's built-in delegation file capability. Set up the delegation files in the configuration file, and then proxy paths under `/.well-known/matrix` to continuwuity. For example, the label ``traefik.http.routers.continuwuity.rule=(Host(`matrix.ellis.link`) || (Host(`ellis.link`) && PathPrefix(`/.well-known/matrix`)))`` does this for the domain `ellis.link`.
## Voice communication
See the [TURN](../turn.md) page.

View file

@ -1,5 +1,5 @@
# Continuwuity for FreeBSD
Continuwuity currently does not provide FreeBSD builds or FreeBSD packaging. However, Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
Contributions to get Continuwuity packaged for FreeBSD are welcome.
Contributions for getting Continuwuity packaged are welcome.

View file

@ -13,42 +13,31 @@
You may simply download the binary that fits your machine architecture (x86_64
or aarch64). Run `uname -m` to see what you need.
You can download prebuilt fully static musl binaries from the latest tagged
Prebuilt fully static musl binaries can be downloaded from the latest tagged
release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or
from the `main` CI branch workflow artifact output. These also include Debian/Ubuntu
`main` CI branch workflow artifact output. These also include Debian/Ubuntu
packages.
You can download these directly using curl. The `ci-bins` are CI workflow binaries organized by commit
These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit
hash/revision, and `releases` are tagged releases. Sort by descending last
modified date to find the latest.
modified for the latest.
These binaries have jemalloc and io_uring statically linked and included with
them, so no additional dynamic dependencies need to be installed.
For the **best** performance: if you are using an `x86_64` CPU made in the last ~15 years,
we recommend using the `-haswell-` optimized binaries. These set
`-march=haswell`, which provides the most compatible and highest performance with
optimized binaries. The database backend, RocksDB, benefits most from this as it
uses hardware-accelerated CRC32 hashing/checksumming, which is critical
For the **best** performance; if using an `x86_64` CPU made in the last ~15 years,
we recommend using the `-haswell-` optimised binaries. This sets
`-march=haswell` which is the most compatible and highest performance with
optimised binaries. The database backend, RocksDB, most benefits from this as it
will then use hardware accelerated CRC32 hashing/checksumming which is critical
for performance.
### Compiling
Alternatively, you may compile the binary yourself.
### Building with the Rust toolchain
If wanting to build using standard Rust toolchains, make sure you install:
- (On linux) `liburing-dev` on the compiling machine, and `liburing` on the target host
- (On linux) `pkg-config` on the compiling machine to allow finding `liburing`
- A C++ compiler and (on linux) `libclang` for RocksDB
You can build Continuwuity using `cargo build --release`.
### Building with Nix
If you prefer, you can use Nix (or [Lix](https://lix.systems)) to build Continuwuity. This provides improved reproducibility and makes it easy to set up a build environment and generate output. This approach also allows for easy cross-compilation.
Alternatively, you may compile the binary yourself. We recommend using
Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most
guaranteed reproducibiltiy and easiest to get a build environment and output
going. This also allows easy cross-compilation.
You can run the `nix build -L .#static-x86_64-linux-musl-all-features` or
`nix build -L .#static-aarch64-linux-musl-all-features` commands based
@ -56,11 +45,17 @@ on architecture to cross-compile the necessary static binary located at
`result/bin/conduwuit`. This is reproducible with the static binaries produced
in our CI.
If wanting to build using standard Rust toolchains, make sure you install:
- `liburing-dev` on the compiling machine, and `liburing` on the target host
- LLVM and libclang for RocksDB
You can build Continuwuity using `cargo build --release --all-features`
## Adding a Continuwuity user
While Continuwuity can run as any user, it is better to use dedicated users for
different services. This also ensures that the file permissions
are set up correctly.
While Continuwuity can run as any user it is better to use dedicated users for
different services. This also allows you to make sure that the file permissions
are correctly set up.
In Debian, you can use this command to create a Continuwuity user:
@ -76,18 +71,18 @@ sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity
## Forwarding ports in the firewall or the router
Matrix's default federation port is 8448, and clients must use port 443.
If you would like to use only port 443 or a different port, you will need to set up
delegation. Continuwuity has configuration options for delegation, or you can configure
your reverse proxy to manually serve the necessary JSON files for delegation
Matrix's default federation port is port 8448, and clients must be using port 443.
If you would like to use only port 443, or a different port, you will need to setup
delegation. Continuwuity has config options for doing delegation, or you can configure
your reverse proxy to manually serve the necessary JSON files to do delegation
(see the `[global.well_known]` config section).
If Continuwuity runs behind a router or in a container and has a different public
IP address than the host system, you need to forward these public ports directly
or indirectly to the port mentioned in the configuration.
IP address than the host system these public ports need to be forwarded directly
or indirectly to the port mentioned in the config.
Note for NAT users: if you have trouble connecting to your server from inside
your network, check if your router supports "NAT
Note for NAT users; if you have trouble connecting to your server from the inside
of your network, you need to research your router and see if it supports "NAT
hairpinning" or "NAT loopback".
If your router does not support this feature, you need to research doing local
@ -97,19 +92,19 @@ on the network level, consider something like NextDNS or Pi-Hole.
## Setting up a systemd service
You can find two example systemd units for Continuwuity
Two example systemd units for Continuwuity can be found
[on the configuration page](../configuration/examples.md#debian-systemd-unit-file).
You may need to change the `ExecStart=` path to match where you placed the Continuwuity
binary if it is not in `/usr/bin/conduwuit`.
You may need to change the `ExecStart=` path to where you placed the Continuwuity
binary if it is not `/usr/bin/conduwuit`.
On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros
and OpenSUSE), put `$EscapeControlCharactersOnReceive off` inside
`/etc/rsyslog.conf` to allow color in logs.
If you are using a different `database_path` than the systemd unit's
If you are using a different `database_path` other than the systemd unit
configured default `/var/lib/conduwuit`, you need to add your path to the
systemd unit's `ReadWritePaths=`. You can do this by either directly editing
`conduwuit.service` and reloading systemd, or by running `systemctl edit conduwuit.service`
systemd unit's `ReadWritePaths=`. This can be done by either directly editing
`conduwuit.service` and reloading systemd, or running `systemctl edit conduwuit.service`
and entering the following:
```
@ -119,8 +114,8 @@ ReadWritePaths=/path/to/custom/database/path
## Creating the Continuwuity configuration file
Now you need to create the Continuwuity configuration file in
`/etc/continuwuity/continuwuity.toml`. You can find an example configuration at
Now we need to create the Continuwuity's config file in
`/etc/continuwuity/continuwuity.toml`. The example config can be found at
[conduwuit-example.toml](../configuration/examples.md).
**Please take a moment to read the config. You need to change at least the
@ -130,8 +125,8 @@ RocksDB is the only supported database backend.
## Setting the correct file permissions
If you are using a dedicated user for Continuwuity, you need to allow it to
read the configuration. To do this, run:
If you are using a dedicated user for Continuwuity, you will need to allow it to
read the config. To do that you can run this:
```bash
sudo chown -R root:root /etc/conduwuit
@ -148,13 +143,13 @@ sudo chmod 700 /var/lib/conduwuit/
## Setting up the Reverse Proxy
We recommend Caddy as a reverse proxy because it is trivial to use and handles TLS certificates, reverse proxy headers, etc. transparently with proper defaults.
We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults.
For other software, please refer to their respective documentation or online guides.
### Caddy
After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile`
and enter the following (substitute your actual server name):
and enter this (substitute for your server name).
```caddyfile
your.server.name, your.server.name:8448 {
@ -173,11 +168,11 @@ sudo systemctl enable --now caddy
### Other Reverse Proxies
As we prefer our users to use Caddy, we do not provide configuration files for other proxies.
As we would prefer our users to use Caddy, we will not provide configuration files for other proxys.
You will need to reverse proxy everything under the following routes:
You will need to reverse proxy everything under following routes:
- `/_matrix/` - core Matrix C-S and S-S APIs
- `/_conduwuit/` and/or `/_continuwuity/` - ad-hoc Continuwuity routes such as `/local_user_count` and
- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and
`/server_version`
You can optionally reverse proxy the following individual routes:
@ -198,16 +193,16 @@ Examples of delegation:
For Apache and Nginx there are many examples available online.
Lighttpd is not supported as it appears to interfere with the `X-Matrix` Authorization
header, making federation non-functional. If you find a workaround, please share it so we can add it to this documentation.
Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization
header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here.
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from interfering with the `X-Matrix` header (note that Apache is not ideal as a general reverse proxy, so we discourage using it if alternatives are available).
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can).
If using Nginx, you need to pass the request URI to Continuwuity using `$request_uri`, like this:
If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so:
- `proxy_pass http://127.0.0.1:6167$request_uri;`
- `proxy_pass http://127.0.0.1:6167;`
Nginx users need to increase the `client_max_body_size` setting (default is 1M) to match the
Nginx users need to increase `client_max_body_size` (default is 1M) to match
`max_request_size` defined in conduwuit.toml.
## You're done
@ -227,7 +222,7 @@ sudo systemctl enable conduwuit
## How do I know it works?
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your
homeserver address, and try to register.
homeserver and try to register.
You can also use these commands as a quick health check (replace
`your.server.name`).
@ -242,10 +237,10 @@ curl https://your.server.name:8448/_conduwuit/server_version
curl https://your.server.name:8448/_matrix/federation/v1/version
```
- To check if your server can communicate with other homeservers, use the
- To check if your server can talk with other homeservers, you can use the
[Matrix Federation Tester](https://federationtester.matrix.org/). If you can
register but cannot join federated rooms, check your configuration and verify
that port 8448 is open and forwarded correctly.
register but cannot join federated rooms check your config again and also check
if the port 8448 is open and forwarded correctly.
# What's next?

View file

@ -1,9 +1,9 @@
# Continuwuity for Kubernetes
Continuwuity doesn't support horizontal scalability or distributed loading
natively. However, a community-maintained Helm Chart is available here to run
natively, however a community maintained Helm Chart is available here to run
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
This should be compatible with Continuwuity, but you will need to change the image reference.
This should be compatible with continuwuity, but you will need to change the image reference.
If changes need to be made, please reach out to the maintainer, as this is not maintained or controlled by the Continuwuity maintainers.
Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers.

View file

@ -1,130 +1,75 @@
# Continuwuity for NixOS
NixOS packages Continuwuity as `matrix-continuwuity`. This package includes both the Continuwuity software and a dedicated NixOS module for configuration and deployment.
Continuwuity can be acquired by Nix (or [Lix][lix]) from various places:
## Installation methods
* The `flake.nix` at the root of the repo
* The `default.nix` at the root of the repo
* From Continuwuity's binary cache
You can acquire Continuwuity with Nix (or [Lix][lix]) from these sources:
### NixOS module
* Directly from Nixpkgs using the official package (`pkgs.matrix-continuwuity`)
* The `flake.nix` at the root of the Continuwuity repo
* The `default.nix` at the root of the Continuwuity repo
The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions
welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure
Continuwuity.
## NixOS module
### Conduit NixOS Config Module and SQLite
Continuwuity now has an official NixOS module that simplifies configuration and deployment. The module is available in Nixpkgs as `services.matrix-continuwuity` from NixOS 25.05.
Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend.
Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB.
Make sure that you are using the RocksDB backend before migrating!
Here's a basic example of how to use the module:
There is a [tool to migrate a Conduit SQLite database to
RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
```nix
{ config, pkgs, ... }:
{
services.matrix-continuwuity = {
enable = true;
settings = {
global = {
server_name = "example.com";
# Listening on localhost by default
# address and port are handled automatically
allow_registration = false;
allow_encryption = true;
allow_federation = true;
trusted_servers = [ "matrix.org" ];
};
};
};
}
```
### Available options
The NixOS module provides these configuration options:
- `enable`: Enable the Continuwuity service
- `user`: The user to run Continuwuity as (defaults to "continuwuity")
- `group`: The group to run Continuwuity as (defaults to "continuwuity")
- `extraEnvironment`: Extra environment variables to pass to the Continuwuity server
- `package`: The Continuwuity package to use
- `settings`: The Continuwuity configuration (in TOML format)
Use the `settings` option to configure Continuwuity itself. See the [example configuration file](../configuration/examples.md#example-configuration) for all available options.
If you want to run the latest code, you should get Continuwuity from the `flake.nix`
or `default.nix` and set [`services.matrix-conduit.package`][package]
appropriately to use Continuwuity instead of Conduit.
### UNIX sockets
The NixOS module natively supports UNIX sockets through the `global.unix_socket_path` option. When using UNIX sockets, set `global.address` to `null`:
Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module
a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX
socket option does not exist in Conduit, and the module forcibly sets the `address` and
`port` config options.
```nix
services.matrix-continuwuity = {
enable = true;
settings = {
global = {
server_name = "example.com";
address = null; # Must be null when using unix_socket_path
unix_socket_path = "/run/continuwuity/continuwuity.sock";
unix_socket_perms = 660; # Default permissions for the socket
# ...
};
};
options.services.matrix-conduit.settings = lib.mkOption {
apply = old: old // (
if (old.global ? "unix_socket_path")
then { global = builtins.removeAttrs old.global [ "address" "port" ]; }
else { }
);
};
```
The module automatically sets the correct `RestrictAddressFamilies` in the systemd service configuration to allow access to UNIX sockets.
Additionally, the [`matrix-conduit` systemd unit][systemd-unit] in the module does not allow
the `AF_UNIX` socket address family in their systemd unit's `RestrictAddressFamilies=` which
disallows the namespace from accessing or creating UNIX sockets and has to be enabled like so:
### RocksDB database
```nix
systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ];
```
Continuwuity exclusively uses RocksDB as its database backend. The system configures the database path automatically to `/var/lib/continuwuity/` and you cannot change it due to the service's reliance on systemd's StateDir.
If you're migrating from Conduit with SQLite, use this [tool to migrate a Conduit SQLite database to RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and
published by the community, would be appreciated.
### jemalloc and hardened profile
Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] because it uses `scudo` by default. Either disable/hide `scudo` from Continuwuity or disable jemalloc like this:
Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix]
due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or
disable jemalloc like so:
```nix
services.matrix-continuwuity = {
enable = true;
package = pkgs.matrix-continuwuity.override {
enableJemalloc = false;
};
# ...
};
```
## Upgrading from Conduit
If you previously used Conduit with the `services.matrix-conduit` module:
1. Ensure your Conduit uses the RocksDB backend, or migrate from SQLite using the [migration tool](https://github.com/ShadowJonathan/conduit_toolbox/)
2. Switch to the new module by changing `services.matrix-conduit` to `services.matrix-continuwuity` in your configuration
3. Update any custom configuration to match the new module's structure
## Reverse proxy configuration
You'll need to set up a reverse proxy (like nginx or caddy) to expose Continuwuity to the internet. Configure your reverse proxy to forward requests to `/_matrix` on port 443 and 8448 to your Continuwuity instance.
Here's an example nginx configuration:
```nginx
server {
listen 443 ssl;
listen [::]:443 ssl;
listen 8448 ssl;
listen [::]:8448 ssl;
server_name example.com;
# SSL configuration here...
location /_matrix/ {
proxy_pass http://127.0.0.1:6167$request_uri;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
let
conduwuit = pkgs.unstable.conduwuit.override {
enableJemalloc = false;
};
in
```
[lix]: https://lix.systems/
[hardened.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/profiles/hardened.nix
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
[hardened.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/profiles/hardened.nix#L22
[systemd-unit]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/conduit.nix#L132

View file

@ -2,7 +2,7 @@
Information about developing the project. If you are only interested in using
it, you can safely ignore this page. If you plan on contributing, see the
[contributor's guide](./contributing.md) and [code style guide](./development/code_style.md).
[contributor's guide](./contributing.md).
## Continuwuity project layout
@ -68,22 +68,31 @@ do this if Rust supported workspace-level features to begin with.
## List of forked dependencies
During Continuwuity (and prior projects) development, we have had to fork some dependencies to support our use-cases.
These forks exist for various reasons including features that upstream projects won't accept,
faster-paced development, Continuwuity-specific usecases, or lack of time to upstream changes.
During Continuwuity development, we have had to fork
some dependencies to support our use-cases in some areas. This ranges from
things said upstream project won't accept for any reason, faster-paced
development (unresponsive or slow upstream), Continuwuity-specific usecases, or
lack of time to upstream some things.
All forked dependencies are maintained under the [continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various performance improvements, more features and better client/server interop
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via [`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
- [jemallocator][continuwuation-jemallocator] - Fork of [tikv/jemallocator][jemallocator] fixing musl builds, suspicious code,
and adding support for redzones in Valgrind
- [rustyline-async][continuwuation-rustyline-async] - Fork of [zyansheep/rustyline-async][rustyline-async] with tab completion callback
and `CTRL+\` signal quit event for Continuwuity console CLI
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of [rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues,
removing unnecessary `gtest` include, and using our RocksDB and jemallocator forks
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing] implementing `Clone` for `EnvFilter` to
support dynamically changing tracing environments
- [ruma/ruma][1]: <https://github.com/girlbossceo/ruwuma> - various performance
improvements, more features, faster-paced development, better client/server interop
hacks upstream won't accept, etc
- [facebook/rocksdb][2]: <https://github.com/girlbossceo/rocksdb> - liburing
build fixes and GCC debug build fix
- [tikv/jemallocator][3]: <https://github.com/girlbossceo/jemallocator> - musl
builds seem to be broken on upstream, fixes some broken/suspicious code in
places, additional safety measures, and support redzones for Valgrind
- [zyansheep/rustyline-async][4]:
<https://github.com/girlbossceo/rustyline-async> - tab completion callback and
`CTRL+\` signal quit event for Continuwuity console CLI
- [rust-rocksdb/rust-rocksdb][5]:
<https://github.com/girlbossceo/rust-rocksdb-zaidoon1> - [`@zaidoon1`][8]'s fork
has quicker updates, more up to date dependencies, etc. Our fork fixes musl build
issues, removes unnecessary `gtest` include, and uses our RocksDB and jemallocator
forks.
- [tokio-rs/tracing][6]: <https://github.com/girlbossceo/tracing> - Implements
`Clone` for `EnvFilter` to support dynamically changing tracing envfilter's
alongside other logging/metrics things
## Debugging with `tokio-console`
@ -104,30 +113,12 @@ You will also need to enable the `tokio_console` config option in Continuwuity w
starting it. This was due to tokio-console causing gradual memory leak/usage
if left enabled.
## Building Docker Images
To build a Docker image for Continuwuity, use the standard Docker build command:
```bash
docker build -f docker/Dockerfile .
```
The image can be cross-compiled for different architectures.
[continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma
[continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb
[continuwuation-jemallocator]: https://forgejo.ellis.link/continuwuation/jemallocator
[continuwuation-rustyline-async]: https://forgejo.ellis.link/continuwuation/rustyline-async
[continuwuation-rust-rocksdb]: https://forgejo.ellis.link/continuwuation/rust-rocksdb
[continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing
[ruma]: https://github.com/ruma/ruma/
[rocksdb]: https://github.com/facebook/rocksdb/
[jemallocator]: https://github.com/tikv/jemallocator/
[rustyline-async]: https://github.com/zyansheep/rustyline-async/
[rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/
[tracing]: https://github.com/tokio-rs/tracing/
[1]: https://github.com/ruma/ruma/
[2]: https://github.com/facebook/rocksdb/
[3]: https://github.com/tikv/jemallocator/
[4]: https://github.com/zyansheep/rustyline-async/
[5]: https://github.com/rust-rocksdb/rust-rocksdb/
[6]: https://github.com/tokio-rs/tracing/
[7]: https://docs.rs/tokio-console/latest/tokio_console/
[8]: https://github.com/zaidoon1/
[9]: https://github.com/rust-lang/cargo/issues/12162

View file

@ -1,331 +0,0 @@
# Code Style Guide
This guide outlines the coding standards and best practices for Continuwuity development. These guidelines help avoid bugs and maintain code consistency, readability, and quality across the project.
These guidelines apply to new code on a best-effort basis. When modifying existing code, follow existing patterns in the immediate area you're changing and then gradually improve code style when making substantial changes.
## General Principles
- **Clarity over cleverness**: Write code that is easy to understand and maintain
- **Consistency**: Pragmatically follow existing patterns in the codebase, rather than adding new dependencies.
- **Safety**: Prefer safe, explicit code over unsafe code with implicit requirements
- **Performance**: Consider performance implications, but not at the expense of correctness or maintainability
## Formatting and Linting
All code must satisfy lints (clippy, rustc, rustdoc, etc) and be formatted using **nightly** rustfmt (`cargo +nightly fmt`). Many of the `rustfmt.toml` features depend on the nightly toolchain.
If you need to allow a lint, ensure it's either obvious why (e.g. clippy saying redundant clone but it's actually required) or add a comment explaining the reason. Do not write inefficient code just to satisfy lints. If a lint is wrong and provides a less efficient solution, allow the lint and mention that in a comment.
If making large formatting changes across unrelated files, create a separate commit so it can be added to the `.git-blame-ignore-revs` file.
## Rust-Specific Guidelines
### Naming Conventions
Follow standard Rust naming conventions as outlined in the [Rust API Guidelines](https://rust-lang.github.io/api-guidelines/naming.html):
- Use `snake_case` for functions, variables, and modules
- Use `PascalCase` for types, traits, and enum variants
- Use `SCREAMING_SNAKE_CASE` for constants and statics
- Use descriptive names that clearly indicate purpose
```rs
// Good
fn process_user_request(user_id: &UserId) -> Result<Response, Error> { ... }
const MAX_RETRY_ATTEMPTS: usize = 3;
struct UserSession {
session_id: String,
created_at: SystemTime,
}
// Avoid
fn proc_reqw(id: &str) -> Result<Resp, Err> { ... }
```
### Error Handling
- Use `Result<T, E>` for operations that can fail
- Prefer specific error types over generic ones
- Use `?` operator for error propagation
- Provide meaningful error messages
- If needed, create or use an error enum.
```rs
// Good
fn parse_server_name(input: &str) -> Result<ServerName, InvalidServerNameError> {
ServerName::parse(input)
.map_err(|_| InvalidServerNameError::new(input))
}
// Avoid
fn parse_server_name(input: &str) -> Result<ServerName, Box<dyn Error>> {
Ok(ServerName::parse(input).unwrap())
}
```
### Option Handling
- Prefer explicit `Option` handling over unwrapping
- Use combinators like `map`, `and_then`, `unwrap_or_else` when appropriate
```rs
// Good
let display_name = user.display_name
.as_ref()
.map(|name| name.trim())
.filter(|name| !name.is_empty())
.unwrap_or(&user.localpart);
// Avoid
let display_name = if user.display_name.is_some() {
user.display_name.as_ref().unwrap()
} else {
&user.localpart
};
```
## Logging Guidelines
### Structured Logging
**Always use structured logging instead of string interpolation.** This improves log parsing, filtering, and observability.
```rs
// Good - structured parameters
debug!(
room_id = %room_id,
user_id = %user_id,
event_type = ?event.event_type(),
"Processing room event"
);
info!(
server_name = %server_name,
response_time_ms = response_time.as_millis(),
"Federation request completed successfully"
);
// Avoid - string interpolation
debug!("Processing room event for {room_id} from {user_id}");
info!("Federation request to {server_name} took {response_time:?}");
```
### Log Levels
Use appropriate log levels:
- `error!`: Unrecoverable errors that affect functionality
- `warn!`: Potentially problematic situations that don't stop execution
- `info!`: General information about application flow
- `debug!`: Detailed information for debugging
- `trace!`: Very detailed information, typically only useful during development
Keep in mind the frequency that the log will be reached, and the relevancy to a server operator.
```rs
// Good
error!(
error = %err,
room_id = %room_id,
"Failed to send event to room"
);
warn!(
server_name = %server_name,
attempt = retry_count,
"Federation request failed, retrying"
);
info!(
user_id = %user_id,
"User registered successfully"
);
debug!(
event_id = %event_id,
auth_events = ?auth_event_ids,
"Validating event authorization"
);
```
### Sensitive Information
Never log sensitive information such as:
- Access tokens
- Passwords
- Private keys
- Personal user data (unless specifically needed for debugging)
```rs
// Good
debug!(
user_id = %user_id,
session_id = %session_id,
"Processing authenticated request"
);
// Avoid
debug!(
user_id = %user_id,
access_token = %access_token,
"Processing authenticated request"
);
```
## Lock Management
### Explicit Lock Scopes
**Always use closure guards instead of implicitly dropped guards.** This makes lock scopes explicit and helps prevent deadlocks.
Use the `WithLock` trait from `core::utils::with_lock`:
```rs
use conduwuit::utils::with_lock::WithLock;
// Good - explicit closure guard
shared_data.with_lock(|data| {
data.counter += 1;
data.last_updated = SystemTime::now();
// Lock is explicitly released here
});
// Avoid - implicit guard
{
let mut data = shared_data.lock().unwrap();
data.counter += 1;
data.last_updated = SystemTime::now();
// Lock released when guard goes out of scope - less explicit
}
```
For async contexts, use the async variant:
```rs
use conduwuit::utils::with_lock::WithLockAsync;
// Good - async closure guard
async_shared_data.with_lock(|data| {
data.process_async_update();
}).await;
```
### Lock Ordering
When acquiring multiple locks, always acquire them in a consistent order to prevent deadlocks:
```rs
// Good - consistent ordering (e.g., by memory address or logical hierarchy)
let locks = [&lock_a, &lock_b, &lock_c];
locks.sort_by_key(|lock| lock as *const _ as usize);
for lock in locks {
lock.with_lock(|data| {
// Process data
});
}
// Avoid - inconsistent ordering that can cause deadlocks
lock_b.with_lock(|data_b| {
lock_a.with_lock(|data_a| {
// Deadlock risk if another thread acquires in A->B order
});
});
```
## Documentation
### Code Comments
- Reference related documentation or parts of the specification
- When a task has multiple ways of being acheved, explain your reasoning for your decision
- Update comments when code changes
```rs
/// Processes a federation request with automatic retries and backoff.
///
/// Implements exponential backoff to handle temporary
/// network issues and server overload gracefully.
pub async fn send_federation_request(
destination: &ServerName,
request: FederationRequest,
) -> Result<FederationResponse, FederationError> {
// Retry with exponential backoff because federation can be flaky
// due to network issues or temporary server overload
let mut retry_delay = Duration::from_millis(100);
for attempt in 1..=MAX_RETRIES {
match try_send_request(destination, &request).await {
Ok(response) => return Ok(response),
Err(err) if err.is_retriable() && attempt < MAX_RETRIES => {
warn!(
destination = %destination,
attempt = attempt,
error = %err,
retry_delay_ms = retry_delay.as_millis(),
"Federation request failed, retrying"
);
tokio::time::sleep(retry_delay).await;
retry_delay *= 2; // Exponential backoff
}
Err(err) => return Err(err),
}
}
unreachable!("Loop should have returned or failed by now")
}
```
### Async Patterns
- Use `async`/`await` appropriately
- Avoid blocking operations in async contexts
- Consider using `tokio::task::spawn_blocking` for CPU-intensive work
```rs
// Good - non-blocking async operation
pub async fn fetch_user_profile(
&self,
user_id: &UserId,
) -> Result<UserProfile, Error> {
let profile = self.db
.get_user_profile(user_id)
.await?;
Ok(profile)
}
// Good - CPU-intensive work moved to blocking thread
pub async fn generate_thumbnail(
&self,
image_data: Vec<u8>,
) -> Result<Vec<u8>, Error> {
tokio::task::spawn_blocking(move || {
image::generate_thumbnail(image_data)
})
.await
.map_err(|_| Error::TaskJoinError)?
}
```
## Inclusivity and Diversity Guidelines
All code and documentation must be written with inclusivity and diversity in mind. This ensures our software is welcoming and accessible to all users and contributors. Follow the [Google guide on writing inclusive code and documentation](https://developers.google.com/style/inclusive-documentation) for comprehensive guidance.
The following types of language are explicitly forbidden in all code, comments, documentation, and commit messages:
**Ableist language:** Avoid terms like "sanity check", "crazy", "insane", "cripple", or "blind to". Use alternatives like "validation", "unexpected", "disable", or "unaware of".
**Socially-charged technical terms:** Replace overly divisive terminology with neutral alternatives:
- "whitelist/blacklist" → "allowlist/denylist" or "permitted/blocked"
- "master/slave" → "primary/replica", "controller/worker", or "parent/child"
When working with external dependencies that use non-inclusive terminology, avoid propagating them in your own APIs and variable names.
Use diverse examples in documentation that avoid culturally-specific references, assumptions about user demographics, or unnecessarily gendered language. Design with accessibility and inclusivity in mind by providing clear error messages and considering diverse user needs.
This software is intended to be used by everyone regardless of background, identity, or ability. Write code and documentation that reflects this commitment to inclusivity.

View file

@ -196,5 +196,5 @@ The initial implementation PR is available [here][1].
[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049
[5]: https://github.com/rust-lang/cargo/issues/12746
[6]: https://crates.io/crates/hot-lib-reloader/
[7]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
[7]: https://matrix.to/#/#continuwuity:continuwuity.org
[8]: https://crates.io/crates/libloading

View file

@ -1,21 +0,0 @@
# Command-Line Help for `continuwuity`
This document contains the help content for the `continuwuity` command-line program.
**Command Overview:**
* [`continuwuity`↴](#continuwuity)
## `continuwuity`
a very cool Matrix chat homeserver written in Rust
**Usage:** `continuwuity [OPTIONS]`
###### **Options:**
* `-c`, `--config <CONFIG>` — Path to the config TOML file (optional)
* `-O`, `--option <OPTION>` — Override a configuration variable using TOML 'key=value' syntax
* `--read-only` — Run in a stricter read-only --maintenance mode
* `--maintenance` — Run in maintenance mode while refusing connections
* `--execute <EXECUTE>` — Execute console command automatically after startup

View file

@ -3,4 +3,4 @@
Content-Type: application/json
/.well-known/continuwuity/*
Access-Control-Allow-Origin: *
Content-Type: application/json
Content-Type: application/json

View file

@ -4,10 +4,6 @@
{
"id": 1,
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
},
{
"id": 3,
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
}
]
}
}

View file

@ -32,4 +32,4 @@
"required": [
"announcements"
]
}
}

2
docs/static/support vendored
View file

@ -21,4 +21,4 @@
}
],
"support_page": "https://continuwuity.org/introduction#contact"
}
}

View file

@ -68,27 +68,3 @@ documentation](https://github.com/coturn/coturn/blob/master/docker/coturn/README
For security recommendations see Synapse's [Coturn
documentation](https://element-hq.github.io/synapse/latest/turn-howto.html).
### Testing
To make sure turn credentials are being correctly served to clients, you can manually make a HTTP request to the turnServer endpoint.
`curl "https://<matrix.example.com>/_matrix/client/r0/voip/turnServer" -H 'Authorization: Bearer <your_client_token>' | jq`
You should get a response like this:
```json
{
"username": "1752792167:@jade:example.com",
"password": "KjlDlawdPbU9mvP4bhdV/2c/h65=",
"uris": [
"turns:coturn.example.com?transport=udp",
"turns:coturn.example.com?transport=tcp",
"turn:coturn.example.com?transport=udp",
"turn:coturn.example.com?transport=tcp"
],
"ttl": 86400
}
```
You can test these credentials work using [Trickle ICE](https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/)

View file

@ -83,7 +83,7 @@ env DIRENV_DEVSHELL=all-features \
--workspace \
--locked \
--profile test \
--features full \
--all-features \
--no-deps \
--document-private-items \
--color always
@ -96,7 +96,6 @@ script = """
direnv exec . \
cargo clippy \
--workspace \
--features full \
--locked \
--profile test \
--color=always \
@ -114,7 +113,7 @@ env DIRENV_DEVSHELL=all-features \
--workspace \
--locked \
--profile test \
--features full \
--all-features \
--color=always \
-- \
-D warnings

161
flake.lock generated
View file

@ -10,11 +10,11 @@
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1751403276,
"narHash": "sha256-V0EPQNsQko1a8OqIWc2lLviLnMpR1m08Ej00z5RVTfs=",
"lastModified": 1738524606,
"narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "896ad88fa57ad5dbcd267c0ac51f1b71ccfcb4dd",
"rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e",
"type": "github"
},
"original": {
@ -32,11 +32,11 @@
"nixpkgs": "nixpkgs_4"
},
"locked": {
"lastModified": 1748883665,
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
"lastModified": 1737621947,
"narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=",
"owner": "cachix",
"repo": "cachix",
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
"rev": "f65a3cd5e339c223471e64c051434616e18cc4f5",
"type": "github"
},
"original": {
@ -63,11 +63,11 @@
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1744206633,
"narHash": "sha256-pb5aYkE8FOoa4n123slgHiOf1UbNSnKe5pEZC+xXD5g=",
"lastModified": 1728672398,
"narHash": "sha256-KxuGSoVUFnQLB2ZcYODW7AVPAh9JqRlD5BrfsC/Q4qs=",
"owner": "cachix",
"repo": "cachix",
"rev": "8a60090640b96f9df95d1ab99e5763a586be1404",
"rev": "aac51f698309fd0f381149214b7eee213c66ef0a",
"type": "github"
},
"original": {
@ -77,6 +77,23 @@
"type": "github"
}
},
"complement": {
"flake": false,
"locked": {
"lastModified": 1741891349,
"narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=",
"owner": "girlbossceo",
"repo": "complement",
"rev": "e587b3df569cba411aeac7c20b6366d03c143745",
"type": "github"
},
"original": {
"owner": "girlbossceo",
"ref": "main",
"repo": "complement",
"type": "github"
}
},
"crane": {
"inputs": {
"nixpkgs": [
@ -100,11 +117,11 @@
},
"crane_2": {
"locked": {
"lastModified": 1750266157,
"narHash": "sha256-tL42YoNg9y30u7zAqtoGDNdTyXTi8EALDeCB13FtbQA=",
"lastModified": 1739936662,
"narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=",
"owner": "ipetkov",
"repo": "crane",
"rev": "e37c943371b73ed87faf33f7583860f81f1d5a48",
"rev": "19de14aaeb869287647d9461cbd389187d8ecdb7",
"type": "github"
},
"original": {
@ -132,11 +149,11 @@
]
},
"locked": {
"lastModified": 1748273445,
"narHash": "sha256-5V0dzpNgQM0CHDsMzh+ludYeu1S+Y+IMjbaskSSdFh0=",
"lastModified": 1733323168,
"narHash": "sha256-d5DwB4MZvlaQpN6OQ4SLYxb5jA4UH5EtV5t5WOtjLPU=",
"owner": "cachix",
"repo": "devenv",
"rev": "668a50d8b7bdb19a0131f53c9f6c25c9071e1ffb",
"rev": "efa9010b8b1cfd5dd3c7ed1e172a470c3b84a064",
"type": "github"
},
"original": {
@ -153,11 +170,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1755585599,
"narHash": "sha256-tl/0cnsqB/Yt7DbaGMel2RLa7QG5elA8lkaOXli6VdY=",
"lastModified": 1740724364,
"narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=",
"owner": "nix-community",
"repo": "fenix",
"rev": "6ed03ef4c8ec36d193c18e06b9ecddde78fb7e42",
"rev": "edf7d9e431cda8782e729253835f178a356d3aab",
"type": "github"
},
"original": {
@ -186,11 +203,11 @@
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
@ -202,11 +219,11 @@
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
@ -289,14 +306,15 @@
"nixpkgs": [
"cachix",
"nixpkgs"
]
],
"nixpkgs-stable": "nixpkgs-stable_2"
},
"locked": {
"lastModified": 1747372754,
"narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=",
"lastModified": 1733318908,
"narHash": "sha256-SVQVsbafSM1dJ4fpgyBqLZ+Lft+jcQuMtEL3lQWx2Sk=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46",
"rev": "6f4e2a2112050951a314d2733a994fbab94864c6",
"type": "github"
},
"original": {
@ -343,6 +361,23 @@
"type": "github"
}
},
"liburing": {
"flake": false,
"locked": {
"lastModified": 1740613216,
"narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=",
"owner": "axboe",
"repo": "liburing",
"rev": "e1003e496e66f9b0ae06674869795edf772d5500",
"type": "github"
},
"original": {
"owner": "axboe",
"ref": "master",
"repo": "liburing",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": [
@ -366,11 +401,11 @@
]
},
"locked": {
"lastModified": 1745930071,
"narHash": "sha256-bYyjarS3qSNqxfgc89IoVz8cAFDkF9yPE63EJr+h50s=",
"lastModified": 1727438425,
"narHash": "sha256-X8ES7I1cfNhR9oKp06F6ir4Np70WGZU5sfCOuNBEwMg=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b455edf3505f1bf0172b39a735caef94687d0d9c",
"rev": "f6c5ae4c1b2e411e6b1e6a8181cc84363d6a7546",
"type": "github"
},
"original": {
@ -449,13 +484,29 @@
"type": "github"
}
},
"nixpkgs_2": {
"nixpkgs-stable_2": {
"locked": {
"lastModified": 1733212471,
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
"lastModified": 1730741070,
"narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
"rev": "d063c1dd113c91ab27959ba540c0d9753409edf3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1730531603,
"narHash": "sha256-Dqg6si5CqIzm87sp57j5nTaeBbWhHFaVyG7V6L8k3lY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "7ffd9ae656aec493492b44d0ddfb28e79a1ea25d",
"type": "github"
},
"original": {
@ -483,11 +534,11 @@
},
"nixpkgs_4": {
"locked": {
"lastModified": 1748190013,
"narHash": "sha256-R5HJFflOfsP5FBtk+zE8FpL8uqE7n62jqOsADvVshhE=",
"lastModified": 1733212471,
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "62b852f6c6742134ade1abdd2a21685fd617a291",
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
"type": "github"
},
"original": {
@ -499,11 +550,11 @@
},
"nixpkgs_5": {
"locked": {
"lastModified": 1751498133,
"narHash": "sha256-QWJ+NQbMU+NcU2xiyo7SNox1fAuwksGlQhpzBl76g1I=",
"lastModified": 1740547748,
"narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d55716bb59b91ae9d1ced4b1ccdea7a442ecbfdb",
"rev": "3a05eebede89661660945da1f151959900903b6a",
"type": "github"
},
"original": {
@ -513,26 +564,46 @@
"type": "github"
}
},
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1741308171,
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
"owner": "girlbossceo",
"repo": "rocksdb",
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
"type": "github"
},
"original": {
"owner": "girlbossceo",
"ref": "v9.11.1",
"repo": "rocksdb",
"type": "github"
}
},
"root": {
"inputs": {
"attic": "attic",
"cachix": "cachix",
"complement": "complement",
"crane": "crane_2",
"fenix": "fenix",
"flake-compat": "flake-compat_3",
"flake-utils": "flake-utils",
"liburing": "liburing",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_5"
"nixpkgs": "nixpkgs_5",
"rocksdb": "rocksdb"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1755504847,
"narHash": "sha256-VX0B9hwhJypCGqncVVLC+SmeMVd/GAYbJZ0MiiUn2Pk=",
"lastModified": 1740691488,
"narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "a905e3b21b144d77e1b304e49f3264f6f8d4db75",
"rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5",
"type": "github"
},
"original": {

829
flake.nix
View file

@ -2,350 +2,577 @@
inputs = {
attic.url = "github:zhaofengli/attic?ref=main";
cachix.url = "github:cachix/cachix?ref=master";
crane = {
url = "github:ipetkov/crane?ref=master";
};
fenix = {
url = "github:nix-community/fenix?ref=main";
inputs.nixpkgs.follows = "nixpkgs";
};
flake-compat = {
url = "github:edolstra/flake-compat?ref=master";
flake = false;
};
complement = { url = "github:girlbossceo/complement?ref=main"; flake = false; };
crane = { url = "github:ipetkov/crane?ref=master"; };
fenix = { url = "github:nix-community/fenix?ref=main"; inputs.nixpkgs.follows = "nixpkgs"; };
flake-compat = { url = "github:edolstra/flake-compat?ref=master"; flake = false; };
flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; };
liburing = { url = "github:axboe/liburing?ref=master"; flake = false; };
};
outputs =
inputs:
inputs.flake-utils.lib.eachDefaultSystem (
system:
let
pkgsHost = import inputs.nixpkgs {
outputs = inputs:
inputs.flake-utils.lib.eachDefaultSystem (system:
let
pkgsHost = import inputs.nixpkgs{
inherit system;
};
pkgsHostStatic = pkgsHost.pkgsStatic;
# The Rust toolchain to use
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI=";
};
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
inherit pkgs;
book = self.callPackage ./nix/pkgs/book {};
complement = self.callPackage ./nix/pkgs/complement {};
craneLib = ((inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain));
inherit inputs;
main = self.callPackage ./nix/pkgs/main {};
oci-image = self.callPackage ./nix/pkgs/oci-image {};
tini = pkgs.tini.overrideAttrs {
# newer clang/gcc is unhappy with tini-static: <https://3.dog/~strawberry/pb/c8y4>
patches = [ (pkgs.fetchpatch {
url = "https://patch-diff.githubusercontent.com/raw/krallin/tini/pull/224.patch";
hash = "sha256-4bTfAhRyIT71VALhHY13hUgbjLEUyvgkIJMt3w9ag3k=";
})
];
};
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [ "out" "dev" "man" ];
buildFlags = [ "library" ];
src = inputs.liburing;
};
rocksdb = (pkgs.rocksdb.override {
liburing = self.liburing;
}).overrideAttrs (old: {
src = inputs.rocksdb;
version = pkgs.lib.removePrefix
"v"
(builtins.fromJSON (builtins.readFile ./flake.lock))
.nodes.rocksdb.original.ref;
# we have this already at https://github.com/girlbossceo/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# unsetting this so i don't have to revert it and make this nix exclusive
patches = [];
cmakeFlags = pkgs.lib.subtractLists
[
# no real reason to have snappy or zlib, no one uses this
"-DWITH_SNAPPY=1"
"-DZLIB=1"
"-DWITH_ZLIB=1"
# we dont need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=1"
# we dont need to build rocksdb tests
"-DWITH_TESTS=1"
# we use rust-rocksdb via C interface and dont need C++ RTTI
"-DUSE_RTTI=1"
# this doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"-DFORCE_SSE42=1"
# PORTABLE will get set in main/default.nix
"-DPORTABLE=1"
]
old.cmakeFlags
++ [
# no real reason to have snappy, no one uses this
"-DWITH_SNAPPY=0"
"-DZLIB=0"
"-DWITH_ZLIB=0"
# we dont need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=0"
# we dont need trace tools
"-DWITH_TRACE_TOOLS=0"
# we dont need to build rocksdb tests
"-DWITH_TESTS=0"
# we use rust-rocksdb via C interface and dont need C++ RTTI
"-DUSE_RTTI=0"
];
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
});
scopeHost = mkScope pkgsHost;
scopeHostStatic = mkScope pkgsHostStatic;
scopeCrossLinux = mkScope pkgsHost.pkgsLinux.pkgsStatic;
mkCrossScope = crossSystem:
let pkgsCrossStatic = (import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
mkScope pkgsCrossStatic;
mkDevShell = scope: scope.pkgs.mkShell {
env = scope.main.env // {
# Rust Analyzer needs to be able to find the path to default crate
# sources, and it can read this environment variable to do so. The
# `rust-src` component is required in order for this to work.
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
# Convenient way to access a pinned version of Complement's source
# code.
COMPLEMENT_SRC = inputs.complement.outPath;
# Needed for Complement: <https://github.com/golang/go/issues/52690>
CGO_CFLAGS = "-Wl,--no-gc-sections";
CGO_LDFLAGS = "-Wl,--no-gc-sections";
};
fnx = inputs.fenix.packages.${system};
# The Rust toolchain to use
toolchain = fnx.combine [
(fnx.fromToolchainFile {
file = ./rust-toolchain.toml;
# Development tools
packages = [
# Always use nightly rustfmt because most of its options are unstable
#
# This needs to come before `toolchain` in this list, otherwise
# `$PATH` will have stable rustfmt instead.
inputs.fenix.packages.${system}.latest.rustfmt
# See also `rust-toolchain.toml`
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
})
fnx.complete.rustfmt
];
toolchain
]
++ (with pkgsHost.pkgs; [
# Required by hardened-malloc.rs dep
binutils
mkScope =
pkgs:
pkgs.lib.makeScope pkgs.newScope (self: {
inherit pkgs inputs;
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
main = self.callPackage ./pkg/nix/pkgs/main { };
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [
"out"
"dev"
"man"
];
buildFlags = [ "library" ];
};
rocksdb =
(pkgs.rocksdb_9_10.override {
# Override the liburing input for the build with our own so
# we have it built with the library flag
inherit (self) liburing;
}).overrideAttrs
(old: {
src = pkgsHost.fetchFromGitea {
domain = "forgejo.ellis.link";
owner = "continuwuation";
repo = "rocksdb";
rev = "10.4.fb";
sha256 = "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=";
};
version = "v10.4.fb";
cmakeFlags =
pkgs.lib.subtractLists [
# No real reason to have snappy or zlib, no one uses this
"-DWITH_SNAPPY=1"
"-DZLIB=1"
"-DWITH_ZLIB=1"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=1"
# We don't need to build rocksdb tests
"-DWITH_TESTS=1"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=1"
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"-DFORCE_SSE42=1"
# PORTABLE will get set in main/default.nix
"-DPORTABLE=1"
] old.cmakeFlags
++ [
# No real reason to have snappy, no one uses this
"-DWITH_SNAPPY=0"
"-DZLIB=0"
"-DWITH_ZLIB=0"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=0"
# We don't need trace tools
"-DWITH_TRACE_TOOLS=0"
# We don't need to build rocksdb tests
"-DWITH_TESTS=0"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=0"
];
cargo-audit
cargo-auditable
# outputs has "tools" which we don't need or use
outputs = [ "out" ];
# Needed for producing Debian packages
cargo-deb
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
preInstall = "";
# Needed for CI to check validity of produced Debian packages (dpkg-deb)
dpkg
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# Unsetting this so we don't have to revert it and make this nix exclusive
patches = [ ];
engage
postPatch = ''
# Fix gcc-13 build failures due to missing <cstdint> and
# <system_error> includes, fixed upstream since 8.x
sed -e '1i #include <cstdint>' -i db/compaction/compaction_iteration_stats.h
sed -e '1i #include <cstdint>' -i table/block_based/data_block_hash_index.h
sed -e '1i #include <cstdint>' -i util/string_util.h
sed -e '1i #include <cstdint>' -i include/rocksdb/utilities/checkpoint.h
'';
});
});
# Needed for Complement
go
scopeHost = mkScope pkgsHost;
mkCrossScope =
crossSystem:
let
pkgsCrossStatic =
(import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
mkScope pkgsCrossStatic;
# Needed for our script for Complement
jq
gotestfmt
in
{
packages =
{
default = scopeHost.main.override {
disable_features = [
# Don't include experimental features
# Needed for finding broken markdown links
lychee
# Needed for linting markdown files
markdownlint-cli
# Useful for editing the book locally
mdbook
# used for rust caching in CI to speed it up
sccache
]
# liburing is Linux-exclusive
++ lib.optional stdenv.hostPlatform.isLinux liburing
++ lib.optional stdenv.hostPlatform.isLinux numactl)
++ scope.main.buildInputs
++ scope.main.propagatedBuildInputs
++ scope.main.nativeBuildInputs;
};
in
{
packages = {
default = scopeHost.main.override {
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
default-debug = scopeHost.main.override {
profile = "dev";
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
# Just a test profile used for things like CI and complement
default-test = scopeHost.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
];
};
default-debug = scopeHost.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features = scopeHost.main.override {
all_features = true;
disable_features = [
# Don't include experimental features
];
};
# just a test profile used for things like CI and complement
default-test = scopeHost.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features = scopeHost.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features-debug = scopeHost.main.override {
profile = "dev";
all_features = true;
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
];
};
all-features-debug = scopeHost.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# This is non-functional on nix for some reason
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
hmalloc = scopeHost.main.override { features = [ "hardened_malloc" ]; };
}
// builtins.listToAttrs (
builtins.concatLists (
builtins.map
(
crossSystem:
let
binaryName = "static-${crossSystem}";
scopeCrossStatic = mkCrossScope crossSystem;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = scopeCrossStatic.main;
}
];
};
hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; };
# An output for a statically-linked binary with x86_64 haswell
# target optimisations
{
name = "${binaryName}-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
oci-image = scopeHost.oci-image;
oci-image-all-features = scopeHost.oci-image.override {
main = scopeHost.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
};
oci-image-all-features-debug = scopeHost.oci-image.override {
main = scopeHost.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
};
oci-image-hmalloc = scopeHost.oci-image.override {
main = scopeHost.main.override {
features = ["hardened_malloc"];
};
};
# An output for a statically-linked unstripped debug ("dev") binary
{
name = "${binaryName}-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
}
book = scopeHost.book;
# An output for a statically-linked unstripped debug binary with the
# "test" profile (for CI usage only)
{
name = "${binaryName}-test";
value = scopeCrossStatic.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features`
{
name = "${binaryName}-all-features";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "${binaryName}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
# An output for a statically-linked unstripped debug ("dev") binary with `--all-features`
{
name = "${binaryName}-all-features-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
value = scopeCrossStatic.main.override {
features = [ "hardened_malloc" ];
};
}
]
)
[
#"x86_64-apple-darwin"
#"aarch64-apple-darwin"
"x86_64-linux-gnu"
"x86_64-linux-musl"
"aarch64-linux-musl"
]
)
);
complement = scopeHost.complement;
static-complement = scopeHostStatic.complement;
# macOS containers don't exist, so the complement images must be forced to linux
linux-complement = (mkCrossScope "${pkgsHost.hostPlatform.qemuArch}-linux-musl").complement;
}
);
//
builtins.listToAttrs
(builtins.concatLists
(builtins.map
(crossSystem:
let
binaryName = "static-${crossSystem}";
scopeCrossStatic = mkCrossScope crossSystem;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = scopeCrossStatic.main;
}
# An output for a statically-linked binary with x86_64 haswell
# target optimisations
{
name = "${binaryName}-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
};
}
# An output for a statically-linked unstripped debug ("dev") binary
{
name = "${binaryName}-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
}
# An output for a statically-linked unstripped debug binary with the
# "test" profile (for CI usage only)
{
name = "${binaryName}-test";
value = scopeCrossStatic.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features`
{
name = "${binaryName}-all-features";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "${binaryName}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
};
}
# An output for a statically-linked unstripped debug ("dev") binary with `--all-features`
{
name = "${binaryName}-all-features-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
value = scopeCrossStatic.main.override {
features = ["hardened_malloc"];
};
}
# An output for an OCI image based on that binary
{
name = "oci-image-${crossSystem}";
value = scopeCrossStatic.oci-image;
}
# An output for an OCI image based on that binary with x86_64 haswell
# target optimisations
{
name = "oci-image-${crossSystem}-x86_64-haswell-optimised";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
};
};
}
# An output for an OCI image based on that unstripped debug ("dev") binary
{
name = "oci-image-${crossSystem}-debug";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
};
}
# An output for an OCI image based on that binary with `--all-features`
{
name = "oci-image-${crossSystem}-all-features";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
};
}
# An output for an OCI image based on that binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "oci-image-${crossSystem}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
};
};
}
# An output for an OCI image based on that unstripped debug ("dev") binary with `--all-features`
{
name = "oci-image-${crossSystem}-all-features-debug";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
};
}
# An output for an OCI image based on that binary with hardened_malloc
{
name = "oci-image-${crossSystem}-hmalloc";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
features = ["hardened_malloc"];
};
};
}
# An output for a complement OCI image for the specified platform
{
name = "complement-${crossSystem}";
value = scopeCrossStatic.complement;
}
]
)
[
#"x86_64-apple-darwin"
#"aarch64-apple-darwin"
"x86_64-linux-gnu"
"x86_64-linux-musl"
"aarch64-linux-musl"
]
)
);
devShells.default = mkDevShell scopeHostStatic;
devShells.all-features = mkDevShell
(scopeHostStatic.overrideScope (final: prev: {
main = prev.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}));
devShells.no-features = mkDevShell
(scopeHostStatic.overrideScope (final: prev: {
main = prev.main.override { default_features = false; };
}));
devShells.dynamic = mkDevShell scopeHost;
});
}

36
nix/pkgs/book/default.nix Normal file
View file

@ -0,0 +1,36 @@
{ inputs
# Dependencies
, main
, mdbook
, stdenv
}:
stdenv.mkDerivation {
inherit (main) pname version;
src = inputs.nix-filter {
root = inputs.self;
include = [
"book.toml"
"conduwuit-example.toml"
"CODE_OF_CONDUCT.md"
"CONTRIBUTING.md"
"README.md"
"development.md"
"debian/conduwuit.service"
"debian/README.md"
"arch/conduwuit.service"
"docs"
"theme"
];
};
nativeBuildInputs = [
mdbook
];
buildPhase = ''
mdbook build -d $out
'';
}

View file

@ -0,0 +1,21 @@
-----BEGIN CERTIFICATE-----
MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL
BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz
IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy
NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m
ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt
/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88
awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp
L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK
K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl
8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV
HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy
ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw
DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ
irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+
HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e
VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3
y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d
jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4=
-----END CERTIFICATE-----

View file

@ -0,0 +1,50 @@
[global]
address = "0.0.0.0"
allow_device_name_federation = true
allow_guest_registration = true
allow_public_room_directory_over_federation = true
allow_public_room_directory_without_auth = true
allow_registration = true
database_path = "/database"
log = "trace,h2=debug,hyper=debug"
port = [8008, 8448]
trusted_servers = []
only_query_trusted_key_servers = false
query_trusted_key_servers_first = false
query_trusted_key_servers_first_on_join = false
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true
ip_range_denylist = []
url_preview_domain_contains_allowlist = ["*"]
url_preview_domain_explicit_denylist = ["*"]
media_compat_file_link = false
media_startup_check = true
prune_missing_media = true
log_colors = true
admin_room_notices = false
allow_check_for_updates = false
intentionally_unknown_config_option_for_testing = true
rocksdb_log_level = "info"
rocksdb_max_log_files = 1
rocksdb_recovery_mode = 0
rocksdb_paranoid_file_checks = true
log_guest_registrations = false
allow_legacy_media = true
startup_netburst = true
startup_netburst_keep = -1
allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true
# valgrind makes things so slow
dns_timeout = 60
dns_attempts = 20
request_conn_timeout = 60
request_timeout = 120
well_known_conn_timeout = 60
well_known_timeout = 60
federation_idle_timeout = 300
sender_timeout = 300
sender_idle_timeout = 300
sender_retry_backoff_limit = 300
[global.tls]
dual_protocol = true

View file

@ -0,0 +1,89 @@
# Dependencies
{ bashInteractive
, buildEnv
, coreutils
, dockerTools
, lib
, main
, stdenv
, tini
, writeShellScriptBin
}:
let
main' = main.override {
profile = "test";
all_features = true;
disable_release_max_log_level = true;
disable_features = [
# console/CLI stuff isn't used or relevant for complement
"console"
"tokio_console"
# sentry telemetry isn't useful for complement, disabled by default anyways
"sentry_telemetry"
"perf_measurements"
# this is non-functional on nix for some reason
"hardened_malloc"
# dont include experimental features
"experimental"
# compression isn't needed for complement
"brotli_compression"
"gzip_compression"
"zstd_compression"
# complement doesn't need hot reloading
"conduwuit_mods"
# complement doesn't have URL preview media tests
"url_preview"
];
};
start = writeShellScriptBin "start" ''
set -euxo pipefail
${lib.getExe' coreutils "env"} \
CONDUWUIT_SERVER_NAME="$SERVER_NAME" \
${lib.getExe main'}
'';
in
dockerTools.buildImage {
name = "complement-conduwuit";
tag = "main";
copyToRoot = buildEnv {
name = "root";
pathsToLink = [
"/bin"
];
paths = [
bashInteractive
coreutils
main'
start
];
};
config = {
Cmd = [
"${lib.getExe start}"
];
Entrypoint = if !stdenv.hostPlatform.isDarwin
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
then [ "${lib.getExe' tini "tini"}" "--" ]
else [];
Env = [
"CONTINUWUITY_TLS__KEY=${./private_key.key}"
"CONTINUWUITY_TLS__CERTS=${./certificate.crt}"
"CONTINUWUITY_CONFIG=${./config.toml}"
"RUST_BACKTRACE=full"
];
ExposedPorts = {
"8008/tcp" = {};
"8448/tcp" = {};
};
};
}

View file

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb
iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT
LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a
09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc
ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga
Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO
/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu
WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB
DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb
piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN
D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ
8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+
3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq
/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90
FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q
td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M
Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A
91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV
8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh
VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW
UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K
kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz
KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7
IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh
tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM
9MVtdgSkuh2gwkD/mMoAJXM=
-----END PRIVATE KEY-----

View file

@ -0,0 +1,16 @@
-----BEGIN CERTIFICATE REQUEST-----
MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK
DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH
uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR
xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb
o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B
hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe
vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB
CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79
ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3
r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb
XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK
MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76
U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/
-----END CERTIFICATE REQUEST-----

View file

@ -0,0 +1,12 @@
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.docker.internal
DNS.2 = hs1
DNS.3 = hs2
DNS.4 = hs3
DNS.5 = hs4
IP.1 = 127.0.0.1

View file

@ -4,47 +4,51 @@
, stdenv
}:
lib.optionalAttrs stdenv.hostPlatform.isStatic
{
ROCKSDB_STATIC = "";
}
lib.optionalAttrs stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = "";
}
//
{
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
(lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[
"-l"
"c"
([]
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
# leaving PIE enabled.
++ lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[
"-l"
"c"
"-l"
"stdc++"
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68
//
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68
//
(
let
inherit (rust.lib) envVars;
in
lib.optionalAttrs
(stdenv.targetPlatform.rust.rustcTarget
!= stdenv.hostPlatform.rust.rustcTarget)
!= stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;

221
nix/pkgs/main/default.nix Normal file
View file

@ -0,0 +1,221 @@
# Dependencies (keep sorted)
{ craneLib
, inputs
, jq
, lib
, libiconv
, liburing
, pkgsBuildHost
, rocksdb
, removeReferencesTo
, rust
, rust-jemalloc-sys
, stdenv
# Options (keep sorted)
, all_features ? false
, default_features ? true
# default list of disabled features
, disable_features ? [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
]
, disable_release_max_log_level ? false
, features ? []
, profile ? "release"
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
# haswell is pretty much any x86 cpu made in the last 12 years, and
# supports modern CPU extensions that rocksdb can make use of.
# disable if trying to make a portable x86_64 build for very old hardware
, x86_64_haswell_target_optimised ? false
}:
let
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
allFeatures = crateFeatures "${inputs.self}/src/main";
features' = lib.unique
(features ++
lib.optionals default_features allDefaultFeatures ++
lib.optionals all_features allFeatures);
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level ["release_max_log_level"];
features'' = lib.subtractLists disable_features' features';
featureEnabled = feature : builtins.elem feature features'';
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
# own. In order for this to work, we need to set flags on the build that match
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
# which features we enable in tikv-jemalloc-sys.
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
unprefixed = true;
}).overrideAttrs (old: {
configureFlags = old.configureFlags ++
# we dont need docs
[ "--disable-doc" ] ++
# we dont need cxx/C++ integration
[ "--disable-cxx" ] ++
# tikv-jemalloc-sys/profiling feature
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
# tikv-jemalloc-sys/stats feature
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
});
buildDepsOnlyEnv =
let
rocksdb' = (rocksdb.override {
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
enableLiburing = enableLiburing;
}).overrideAttrs (old: {
enableLiburing = enableLiburing;
cmakeFlags = (if x86_64_haswell_target_optimised then (lib.subtractLists [
# dont make a portable build if x86_64_haswell_target_optimised is enabled
"-DPORTABLE=1"
] old.cmakeFlags
++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ])
)
++ old.cmakeFlags;
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
in
{
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = profile;
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
}
//
(import ./cross-compilation-env.nix {
# Keep sorted
inherit
lib
pkgsBuildHost
rust
stdenv;
});
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
} // buildDepsOnlyEnv // {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
" -L${lib.getLib liburing}/lib -luring"
+ lib.optionalString x86_64_haswell_target_optimised
" -Ctarget-cpu=haswell";
};
commonAttrs = {
inherit
(craneLib.crateNameFromCargoToml {
cargoToml = "${inputs.self}/Cargo.toml";
})
pname
version;
src = let filter = inputs.nix-filter.lib; in filter {
root = inputs.self;
# Keep sorted
include = [
".cargo"
"Cargo.lock"
"Cargo.toml"
"src"
];
};
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [])
"--features " + (builtins.concatStringsSep "," features'');
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
];
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgsBuildHost.rustPlatform.bindgenHook
# We don't actually depend on `jq`, but crane's `buildPackage` does, but
# its `buildDepsOnly` doesn't. This causes those two derivations to have
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
# rebuilds of bindgen and its depedents.
jq
];
};
in
craneLib.buildPackage ( commonAttrs // {
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
env = buildDepsOnlyEnv;
});
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [])
"--features " + (builtins.concatStringsSep "," features'');
env = buildPackageEnv;
passthru = {
env = buildPackageEnv;
};
meta.mainProgram = commonAttrs.pname;
})

View file

@ -0,0 +1,46 @@
{ inputs
# Dependencies
, dockerTools
, lib
, main
, stdenv
, tini
}:
dockerTools.buildLayeredImage {
name = main.pname;
tag = "main";
created = "@${toString inputs.self.lastModified}";
contents = [
dockerTools.caCertificates
main
];
config = {
Entrypoint = if !stdenv.hostPlatform.isDarwin
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
then [ "${lib.getExe' tini "tini"}" "--" ]
else [];
Cmd = [
"${lib.getExe main}"
];
Env = [
"RUST_BACKTRACE=full"
];
Labels = {
"org.opencontainers.image.authors" = "June Clementine Strawberry <june@girlboss.ceo> and Jason Volk
<jason@zemos.net>";
"org.opencontainers.image.created" ="@${toString inputs.self.lastModified}";
"org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust";
"org.opencontainers.image.documentation" = "https://continuwuity.org/";
"org.opencontainers.image.licenses" = "Apache-2.0";
"org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or "";
"org.opencontainers.image.source" = "https://forgejo.ellis.link/continuwuation/continuwuity";
"org.opencontainers.image.title" = main.pname;
"org.opencontainers.image.url" = "https://continuwuity.org/";
"org.opencontainers.image.vendor" = "continuwuation";
"org.opencontainers.image.version" = main.version;
};
};
}

View file

@ -1,23 +0,0 @@
# Continuwuity for Debian
This document provides information about downloading and deploying the Debian package. You can also use this guide for other `apt`-based distributions such as Ubuntu.
### Installation
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
No `apt` repository is currently available. This feature is in development.
### Configuration
After installation, Continuwuity places the example configuration at `/etc/conduwuit/conduwuit.toml` as the default configuration file. The configuration file indicates which settings you must change before starting the service.
You can customize additional settings by uncommenting and modifying the configuration options in `/etc/conduwuit/conduwuit.toml`.
### Running
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/sbin/conduwuit`.
By default, this package assumes that Continuwuity runs behind a reverse proxy. The default configuration options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS. To federate properly, you must set up TLS certificates and certificate renewal.
For information about setting up a reverse proxy and TLS, consult online documentation and guides. The [generic deployment guide](../deploying/generic.md#setting-up-the-reverse-proxy) documents Caddy, which is the most user-friendly option for reverse proxy configuration.

View file

@ -1,20 +0,0 @@
#!/bin/sh
set -e
# TODO: implement debconf support that is maintainable without duplicating the config
#. /usr/share/debconf/confmodule
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
case "$1" in
configure)
echo ''
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
echo 'To start the server, run: systemctl start conduwuit.service'
echo ''
;;
esac
#DEBHELPER#

View file

@ -1,80 +0,0 @@
# This should be run using rpkg-util: https://docs.pagure.org/rpkg-util
# it requires Internet access and is not suitable for Fedora main repos
# TODO: rpkg-util is no longer maintained, find a replacement
Name: continuwuity
Version: {{{ git_repo_version }}}
Release: 1%{?dist}
Summary: Very cool Matrix chat homeserver written in Rust
License: Apache-2.0 AND MIT
URL: https://continuwuity.org
VCS: {{{ git_repo_vcs }}}
Source: {{{ git_repo_pack }}}
BuildRequires: cargo-rpm-macros >= 25
BuildRequires: systemd-rpm-macros
# Needed to build rust-librocksdb-sys
BuildRequires: clang
BuildRequires: liburing-devel
Requires: liburing
Requires: glibc
Requires: libstdc++
%global _description %{expand:
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
%description %{_description}
%prep
{{{ git_repo_setup_macro }}}
%cargo_prep -N
# Perform an online build so Git dependencies can be retrieved
sed -i 's/^offline = true$//' .cargo/config.toml
%build
%cargo_build
# Here's the one legally required mystery incantation in this file.
# Some of our dependencies have source files which are (for some reason) marked as executable.
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
# at the end of the build step, and then the BRP shebang mangling script checks
# the entire buildroot to find executable files, and fails the build because
# it thinks Rust's file attributes are shebangs because they start with `#!`.
# So we have to clear the executable bit on all of them before that happens.
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
# TODO: this fails currently because it's forced to run in offline mode
# {cargo_license -- --no-dev} > LICENSE.dependencies
%install
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
%files
%license LICENSE
%license src/core/matrix/state_res/LICENSE
%doc CODE_OF_CONDUCT.md
%doc CONTRIBUTING.md
%doc README.md
%doc SECURITY.md
%config %{_sysconfdir}/conduwuit/conduwuit.toml
%{_bindir}/conduwuit
%{_unitdir}/conduwuit.service
# Do not create /var/lib/conduwuit, systemd will create it if necessary
%post
%systemd_post conduwuit.service
%preun
%systemd_preun conduwuit.service
%postun
%systemd_postun_with_restart conduwuit.service
%changelog
{{{ git_repo_changelog }}}

View file

@ -1,224 +0,0 @@
# Dependencies (keep sorted)
{ craneLib
, inputs
, jq
, lib
, libiconv
, liburing
, pkgsBuildHost
, rocksdb
, removeReferencesTo
, rust
, rust-jemalloc-sys
, stdenv
# Options (keep sorted)
, all_features ? false
, default_features ? true
# default list of disabled features
, disable_features ? [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
]
, disable_release_max_log_level ? false
, features ? [ ]
, profile ? "release"
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
# haswell is pretty much any x86 cpu made in the last 12 years, and
# supports modern CPU extensions that rocksdb can make use of.
# disable if trying to make a portable x86_64 build for very old hardware
, x86_64_haswell_target_optimised ? false
}:
let
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
allFeatures = crateFeatures "${inputs.self}/src/main";
features' = lib.unique
(features ++
lib.optionals default_features allDefaultFeatures ++
lib.optionals all_features allFeatures);
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
features'' = lib.subtractLists disable_features' features';
featureEnabled = feature: builtins.elem feature features'';
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
# own. In order for this to work, we need to set flags on the build that match
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
# which features we enable in tikv-jemalloc-sys.
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
unprefixed = true;
}).overrideAttrs (old: {
configureFlags = old.configureFlags ++
# we dont need docs
[ "--disable-doc" ] ++
# we dont need cxx/C++ integration
[ "--disable-cxx" ] ++
# tikv-jemalloc-sys/profiling feature
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
# tikv-jemalloc-sys/stats feature
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
});
buildDepsOnlyEnv =
let
rocksdb' = (rocksdb.override {
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
inherit enableLiburing;
}).overrideAttrs (old: {
inherit enableLiburing;
cmakeFlags = (if x86_64_haswell_target_optimised then
(lib.subtractLists [
# dont make a portable build if x86_64_haswell_target_optimised is enabled
"-DPORTABLE=1"
]
old.cmakeFlags
++ [ "-DPORTABLE=haswell" ]) else [ "-DPORTABLE=1" ]
)
++ old.cmakeFlags;
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
in
{
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = profile;
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
}
//
(import ./cross-compilation-env.nix {
# Keep sorted
inherit
lib
pkgsBuildHost
rust
stdenv;
});
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
} // buildDepsOnlyEnv // {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
" -L${lib.getLib liburing}/lib -luring"
+ lib.optionalString x86_64_haswell_target_optimised
" -Ctarget-cpu=haswell";
};
commonAttrs = {
inherit
(craneLib.crateNameFromCargoToml {
cargoToml = "${inputs.self}/Cargo.toml";
})
pname
version;
src = let filter = inputs.nix-filter.lib; in filter {
root = inputs.self;
# Keep sorted
include = [
".cargo"
"Cargo.lock"
"Cargo.toml"
"src"
"xtask"
];
};
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
];
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgsBuildHost.rustPlatform.bindgenHook
# We don't actually depend on `jq`, but crane's `buildPackage` does, but
# its `buildDepsOnly` doesn't. This causes those two derivations to have
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
# rebuilds of bindgen and its depedents.
jq
];
};
in
craneLib.buildPackage (commonAttrs // {
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
env = buildDepsOnlyEnv;
});
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
env = buildPackageEnv;
passthru = {
env = buildPackageEnv;
};
meta.mainProgram = commonAttrs.pname;
})

View file

@ -1,73 +1,26 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended", "replacements:all"],
"osvVulnerabilityAlerts": true,
"lockFileMaintenance": {
"enabled": true,
"schedule": ["at any time"]
},
"nix": {
"enabled": true
},
"labels": ["Dependencies", "Dependencies/Renovate"],
"ignoreDeps": [
"tikv-jemallocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"opentelemetry",
"opentelemetry_sdk",
"opentelemetry-jaeger",
"tracing-opentelemetry"
],
"github-actions": {
"enabled": true,
"managerFilePatterns": [
"/(^|/)\\.forgejo/workflows/[^/]+\\.ya?ml$/",
"/(^|/)\\.forgejo/actions/[^/]+/action\\.ya?ml$/",
"/(^|/)\\.github/workflows/[^/]+\\.ya?ml$/",
"/(^|/)\\.github/actions/[^/]+/action\\.ya?ml$/"
]
},
"packageRules": [
{
"description": "Batch minor and patch GitHub Actions updates",
"matchManagers": ["github-actions"],
"matchUpdateTypes": ["minor", "patch"],
"groupName": "github-actions-non-major"
},
{
"description": "Group Rust toolchain updates into a single PR",
"matchManagers": ["custom.regex"],
"matchPackageNames": ["rust", "rustc", "cargo"],
"groupName": "rust-toolchain"
},
{
"description": "Group lockfile updates into a single PR",
"matchUpdateTypes": ["lockFileMaintenance"],
"groupName": "lockfile-maintenance"
},
{
"description": "Batch patch-level Rust dependency updates",
"matchManagers": ["cargo"],
"matchUpdateTypes": ["patch"],
"groupName": "rust-patch-updates"
},
{
"matchManagers": ["cargo"],
"prConcurrentLimit": 5
}
],
"customManagers": [
{
"customType": "regex",
"description": "Update _VERSION variables in Dockerfiles",
"managerFilePatterns": [
"/(^|/)([Dd]ocker|[Cc]ontainer)file[^/]*$/",
"/(^|/|\\.)([Dd]ocker|[Cc]ontainer)file$/"
],
"matchStrings": [
"# renovate: datasource=(?<datasource>[a-z-.]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV|ARG)\\s+[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s"
]
}
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended"
],
"lockFileMaintenance": {
"enabled": true,
"schedule": [
"at any time"
]
},
"nix": {
"enabled": true
},
"labels": [
"dependencies",
"github_actions"
],
"ignoreDeps": [
"tikv-jemllocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"opentelemetry-rust",
"tracing-opentelemetry"
]
}

View file

@ -9,16 +9,21 @@
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.86.0"
profile = "minimal"
channel = "1.89.0"
components = [
# For rust-analyzer
"rust-src",
"rust-analyzer",
# For CI and editors
"rustfmt",
"clippy",
# you have to install rustfmt nightly yourself (if you're not on NixOS)
#
# The rust-toolchain.toml file doesn't provide any syntax for specifying components from different toolchains
# "rustfmt"
]
targets = [
#"x86_64-apple-darwin",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"aarch64-unknown-linux-musl",
"aarch64-unknown-linux-gnu",
#"aarch64-apple-darwin",
]

View file

@ -85,11 +85,10 @@ futures.workspace = true
log.workspace = true
ruma.workspace = true
serde_json.workspace = true
serde_yml.workspace = true
serde_yaml.workspace = true
tokio.workspace = true
tracing-subscriber.workspace = true
tracing.workspace = true
ctor.workspace = true
[lints]
workspace = true

View file

@ -9,8 +9,8 @@ use crate::{
};
#[derive(Debug, Parser)]
#[command(name = conduwuit_core::name(), version = conduwuit_core::version())]
pub enum AdminCommand {
#[command(name = "conduwuit", version = conduwuit::version())]
pub(super) enum AdminCommand {
#[command(subcommand)]
/// - Commands for managing appservices
Appservices(AppserviceCommand),

View file

@ -16,7 +16,7 @@ pub(super) async fn register(&self) -> Result {
let range = 1..checked!(body_len - 1)?;
let appservice_config_body = body[range].join("\n");
let parsed_config = serde_yml::from_str(&appservice_config_body);
let parsed_config = serde_yaml::from_str(&appservice_config_body);
match parsed_config {
| Err(e) => return Err!("Could not parse appservice config as YAML: {e}"),
| Ok(registration) => match self
@ -57,7 +57,7 @@ pub(super) async fn show_appservice_config(&self, appservice_identifier: String)
{
| None => return Err!("Appservice does not exist."),
| Some(config) => {
let config_str = serde_yml::to_string(&config)?;
let config_str = serde_yaml::to_string(&config)?;
write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```")
},
}

View file

@ -7,7 +7,7 @@ use crate::admin_command_dispatch;
#[derive(Debug, Subcommand)]
#[admin_command_dispatch]
pub enum AppserviceCommand {
pub(super) enum AppserviceCommand {
/// - Register an appservice using its registration YAML
///
/// This command needs a YAML generated by an appservice (such as a bridge),

View file

@ -7,6 +7,6 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum CheckCommand {
pub(super) enum CheckCommand {
CheckAllUsers,
}

View file

@ -7,14 +7,13 @@ use futures::{
io::{AsyncWriteExt, BufWriter},
lock::Mutex,
};
use ruma::{EventId, UserId};
use ruma::EventId;
pub(crate) struct Context<'a> {
pub(crate) services: &'a Services,
pub(crate) body: &'a [&'a str],
pub(crate) timer: SystemTime,
pub(crate) reply_id: Option<&'a EventId>,
pub(crate) sender: Option<&'a UserId>,
pub(crate) output: Mutex<BufWriter<Vec<u8>>>,
}
@ -37,10 +36,4 @@ impl Context<'_> {
output.write_all(s.as_bytes()).map_err(Into::into).await
})
}
/// Get the sender as a string, or service user ID if not available
pub(crate) fn sender_or_service_user(&self) -> &UserId {
self.sender
.unwrap_or_else(|| self.services.globals.server_user.as_ref())
}
}

View file

@ -7,10 +7,7 @@ use std::{
use conduwuit::{
Err, Result, debug_error, err, info,
matrix::{
Event,
pdu::{PduEvent, PduId, RawPduId},
},
matrix::pdu::{PduEvent, PduId, RawPduId},
trace, utils,
utils::{
stream::{IterStream, ReadyExt},
@ -22,7 +19,7 @@ use futures::{FutureExt, StreamExt, TryStreamExt};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
api::federation::event::get_room_state, events::AnyStateEvent, serde::Raw,
api::federation::event::get_room_state,
};
use service::rooms::{
short::{ShortEventId, ShortRoomId},
@ -242,11 +239,10 @@ pub(super) async fn get_remote_pdu(
})
.await
{
| Err(e) => {
| Err(e) =>
return Err!(
"Remote server did not have PDU or failed sending request to remote server: {e}"
);
},
),
| Ok(response) => {
let json: CanonicalJsonObject =
serde_json::from_str(response.pdu.get()).map_err(|e| {
@ -281,8 +277,15 @@ pub(super) async fn get_remote_pdu(
vec![(event_id, value, room_id)]
};
info!("Attempting to handle event ID {event_id} as backfilled PDU");
self.services
.rooms
.timeline
.backfill_pdu(&server, response.pdu)
.await?;
let text = serde_json::to_string_pretty(&json)?;
let msg = "Got PDU from specified server:";
let msg = "Got PDU from specified server and handled as backfilled";
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
},
}
@ -292,12 +295,12 @@ pub(super) async fn get_remote_pdu(
#[admin_command]
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
let room_id = self.services.rooms.alias.resolve(&room).await?;
let room_state: Vec<Raw<AnyStateEvent>> = self
let room_state: Vec<_> = self
.services
.rooms
.state_accessor
.room_state_full_pdus(&room_id)
.map_ok(Event::into_format)
.map_ok(PduEvent::into_state_event)
.try_collect()
.await?;
@ -381,9 +384,8 @@ pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool)
.reload
.reload(&old_filter_layer, Some(handles))
{
| Err(e) => {
return Err!("Failed to modify and reload the global tracing log level: {e}");
},
| Err(e) =>
return Err!("Failed to modify and reload the global tracing log level: {e}"),
| Ok(()) => {
let value = &self.services.server.config.log;
let out = format!("Successfully changed log level back to config value {value}");
@ -405,12 +407,9 @@ pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool)
.reload
.reload(&new_filter_layer, Some(handles))
{
| Ok(()) => {
return self.write_str("Successfully changed log level").await;
},
| Err(e) => {
return Err!("Failed to modify and reload the global tracing log level: {e}");
},
| Ok(()) => return self.write_str("Successfully changed log level").await,
| Err(e) =>
return Err!("Failed to modify and reload the global tracing log level: {e}"),
}
}
@ -530,7 +529,6 @@ pub(super) async fn force_set_room_state_from_server(
&self,
room_id: OwnedRoomId,
server_name: OwnedServerName,
at_event: Option<OwnedEventId>,
) -> Result {
if !self
.services
@ -542,18 +540,13 @@ pub(super) async fn force_set_room_state_from_server(
return Err!("We are not participating in the room / we don't know about the room ID.");
}
let at_event_id = match at_event {
| Some(event_id) => event_id,
| None => self
.services
.rooms
.timeline
.latest_pdu_in_room(&room_id)
.await
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?
.event_id()
.to_owned(),
};
let first_pdu = self
.services
.rooms
.timeline
.latest_pdu_in_room(&room_id)
.await
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
@ -564,7 +557,7 @@ pub(super) async fn force_set_room_state_from_server(
.sending
.send_federation_request(&server_name, get_room_state::v1::Request {
room_id: room_id.clone(),
event_id: at_event_id,
event_id: first_pdu.event_id.clone(),
})
.await?;

View file

@ -11,7 +11,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum DebugCommand {
pub(super) enum DebugCommand {
/// - Echo input of admin command
Echo {
message: Vec<String>,
@ -32,13 +32,13 @@ pub enum DebugCommand {
/// the command.
ParsePdu,
/// - Retrieve and print a PDU by EventID from the Continuwuity database
/// - Retrieve and print a PDU by EventID from the conduwuit database
GetPdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: OwnedEventId,
},
/// - Retrieve and print a PDU by PduId from the Continuwuity database
/// - Retrieve and print a PDU by PduId from the conduwuit database
GetShortPdu {
/// Shortroomid integer
shortroomid: ShortRoomId,
@ -177,12 +177,9 @@ pub enum DebugCommand {
room_id: OwnedRoomId,
/// The server we will use to query the room state for
server_name: OwnedServerName,
/// The event ID of the latest known PDU in the room. Will be found
/// automatically if not provided.
event_id: Option<OwnedEventId>,
},
/// - Runs a server name through Continuwuity's true destination resolution
/// - Runs a server name through conduwuit's true destination resolution
/// process
///
/// Useful for debugging well-known issues

View file

@ -4,7 +4,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, clap::Subcommand)]
pub enum TesterCommand {
pub(crate) enum TesterCommand {
Panic,
Failure,
Tester,

View file

@ -26,7 +26,8 @@ pub(super) async fn incoming_federation(&self) -> Result {
.rooms
.event_handler
.federation_handletime
.read();
.read()
.expect("locked");
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
for (r, (e, i)) in map.iter() {

View file

@ -8,7 +8,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum FederationCommand {
pub(super) enum FederationCommand {
/// - List all rooms we are currently handling an incoming pdu from
IncomingFederation,

View file

@ -9,7 +9,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum MediaCommand {
pub(super) enum MediaCommand {
/// - Deletes a single media file from our database and on the filesystem
/// via a single MXC URL or event ID (not redacted)
Delete {
@ -90,10 +90,10 @@ pub enum MediaCommand {
#[arg(short, long, default_value("10000"))]
timeout: u32,
#[arg(long, default_value("800"))]
#[arg(short, long, default_value("800"))]
width: u32,
#[arg(long, default_value("800"))]
#[arg(short, long, default_value("800"))]
height: u32,
},
}

View file

@ -29,17 +29,17 @@ pub(crate) use crate::{context::Context, utils::get_room_info};
pub(crate) const PAGE_SIZE: usize = 100;
use ctor::{ctor, dtor};
conduwuit::mod_ctor! {}
conduwuit::mod_dtor! {}
conduwuit::rustc_flags_capture! {}
pub use crate::admin::AdminCommand;
/// Install the admin command processor
pub async fn init(admin_service: &service::admin::Service) {
_ = admin_service.complete.write().insert(processor::complete);
_ = admin_service
.complete
.write()
.expect("locked for writing")
.insert(processor::complete);
_ = admin_service
.handle
.write()
@ -50,5 +50,9 @@ pub async fn init(admin_service: &service::admin::Service) {
/// Uninstall the admin command handler
pub async fn fini(admin_service: &service::admin::Service) {
_ = admin_service.handle.write().await.take();
_ = admin_service.complete.write().take();
_ = admin_service
.complete
.write()
.expect("locked for writing")
.take();
}

View file

@ -1,8 +1,14 @@
use std::{fmt::Write, mem::take, panic::AssertUnwindSafe, sync::Arc, time::SystemTime};
use std::{
fmt::Write,
mem::take,
panic::AssertUnwindSafe,
sync::{Arc, Mutex},
time::SystemTime,
};
use clap::{CommandFactory, Parser};
use conduwuit::{
Error, Result, SyncMutex, debug, error,
Error, Result, debug, error,
log::{
capture,
capture::Capture,
@ -57,7 +63,6 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
body: &body,
timer: SystemTime::now(),
reply_id: input.reply_id.as_deref(),
sender: input.sender.as_deref(),
output: BufWriter::new(Vec::new()).into(),
};
@ -88,7 +93,8 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
#[allow(clippy::result_large_err)]
fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
let link = "Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺";
let link =
"Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺";
let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}");
let content = RoomMessageEventContent::notice_markdown(msg);
error!("Panic while processing command: {error:?}");
@ -117,7 +123,7 @@ async fn process(
let mut output = String::new();
// Prepend the logs only if any were captured
let logs = logs.lock();
let logs = logs.lock().expect("locked");
if logs.lines().count() > 2 {
writeln!(&mut output, "{logs}").expect("failed to format logs to command output");
}
@ -126,7 +132,7 @@ async fn process(
(result, output)
}
fn capture_create(context: &Context<'_>) -> (Arc<Capture>, Arc<SyncMutex<String>>) {
fn capture_create(context: &Context<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
let env_config = &context.services.server.config.admin_log_capture;
let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| {
warn!("admin_log_capture filter invalid: {e:?}");
@ -146,7 +152,7 @@ fn capture_create(context: &Context<'_>) -> (Arc<Capture>, Arc<SyncMutex<String>
data.level() <= log_level && data.our_modules() && data.scope.contains(&"admin")
};
let logs = Arc::new(SyncMutex::new(
let logs = Arc::new(Mutex::new(
collect_stream(|s| markdown_table_head(s)).expect("markdown table header"),
));

View file

@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/account_data.rs
pub enum AccountDataCommand {
pub(crate) enum AccountDataCommand {
/// - Returns all changes to the account data that happened after `since`.
ChangesSince {
/// Full user ID

View file

@ -6,7 +6,7 @@ use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/appservice.rs
pub enum AppserviceCommand {
pub(crate) enum AppserviceCommand {
/// - Gets the appservice registration info/details from the ID as a string
GetRegistration {
/// Appservice registration ID

View file

@ -6,7 +6,7 @@ use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/globals.rs
pub enum GlobalsCommand {
pub(crate) enum GlobalsCommand {
DatabaseVersion,
CurrentCount,

View file

@ -27,7 +27,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// Query tables from database
pub enum QueryCommand {
pub(super) enum QueryCommand {
/// - account_data.rs iterators and getters
#[command(subcommand)]
AccountData(AccountDataCommand),

Some files were not shown because too many files have changed in this diff Show more