mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-09-09 13:53:03 +02:00
Compare commits
21 commits
8bff0179fe
...
8c0a54e4ba
Author | SHA1 | Date | |
---|---|---|---|
|
8c0a54e4ba | ||
|
81257f5bcd | ||
|
2cedf0d2e1 | ||
|
84fdcd326a | ||
|
d640853f9d | ||
|
fff9629b0f | ||
|
1a3107c20a | ||
|
969d7cbb66 | ||
|
cd238b05de |
||
|
c0e3829fed |
||
|
1d7dda6cf5 |
||
|
6f19931c5b |
||
|
2516e783ba | ||
|
fdf5771387 |
||
|
58bbc0e676 | ||
|
0d58e660a2 | ||
|
e7124edb73 | ||
|
d19e0f0d97 | ||
|
467aed3028 | ||
|
99b44bbf09 | ||
|
95aeff8cdc |
43 changed files with 862 additions and 801 deletions
|
@ -13,6 +13,12 @@ outputs:
|
|||
slug:
|
||||
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
|
||||
value: ${{ steps.detect.outputs.slug }}
|
||||
node_major:
|
||||
description: 'Major version of Node.js if available (e.g. 22)'
|
||||
value: ${{ steps.detect.outputs.node_major }}
|
||||
node_version:
|
||||
description: 'Full Node.js version if available (e.g. 22.19.0)'
|
||||
value: ${{ steps.detect.outputs.node_version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
|
@ -30,7 +36,20 @@ runs:
|
|||
# Create combined slug
|
||||
OS_SLUG="${OS_NAME}-${OS_VERSION}"
|
||||
|
||||
# Set outputs
|
||||
# Detect Node.js version if available
|
||||
if command -v node >/dev/null 2>&1; then
|
||||
NODE_VERSION=$(node --version | sed 's/v//')
|
||||
NODE_MAJOR=$(echo $NODE_VERSION | cut -d. -f1)
|
||||
echo "node_version=${NODE_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "node_major=${NODE_MAJOR}" >> $GITHUB_OUTPUT
|
||||
echo "🔍 Detected Node.js: v${NODE_VERSION}"
|
||||
else
|
||||
echo "node_version=" >> $GITHUB_OUTPUT
|
||||
echo "node_major=" >> $GITHUB_OUTPUT
|
||||
echo "🔍 Node.js not found"
|
||||
fi
|
||||
|
||||
# Set OS outputs
|
||||
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
|
||||
|
|
|
@ -2,18 +2,12 @@ name: sccache
|
|||
description: |
|
||||
Install sccache for caching builds in GitHub Actions.
|
||||
|
||||
inputs:
|
||||
token:
|
||||
description: 'A Github PAT'
|
||||
required: false
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install sccache
|
||||
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
|
||||
with:
|
||||
token: ${{ inputs.token }}
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
- name: Configure sccache
|
||||
uses: https://github.com/actions/github-script@v7
|
||||
with:
|
||||
|
|
|
@ -88,19 +88,9 @@ runs:
|
|||
# Shared toolchain cache across all Rust versions
|
||||
key: toolchain-${{ steps.runner-os.outputs.slug }}
|
||||
|
||||
- name: Debug GitHub token availability
|
||||
shell: bash
|
||||
run: |
|
||||
if [ -z "${{ inputs.github-token }}" ]; then
|
||||
echo "⚠️ No GitHub token provided - sccache will use fallback download method"
|
||||
else
|
||||
echo "✅ GitHub token provided for sccache"
|
||||
fi
|
||||
|
||||
- name: Setup sccache
|
||||
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
|
||||
with:
|
||||
token: ${{ inputs.github-token }}
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
|
||||
- name: Cache build artifacts
|
||||
id: build-cache
|
||||
|
|
|
@ -49,10 +49,23 @@ jobs:
|
|||
cp ./docs/static/_headers ./public/_headers
|
||||
echo "Copied .well-known files and _headers to ./public"
|
||||
|
||||
- name: Detect runner environment
|
||||
id: runner-env
|
||||
uses: ./.forgejo/actions/detect-runner-os
|
||||
|
||||
- name: Setup Node.js
|
||||
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
|
||||
uses: https://github.com/actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 22
|
||||
|
||||
- name: Cache npm dependencies
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ steps.runner-env.outputs.slug }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ steps.runner-env.outputs.slug }}-node-
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install --save-dev wrangler@latest
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
name: Checks / Prek
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
|
@ -3,15 +3,25 @@ concurrency:
|
|||
group: "release-image-${{ github.ref }}"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitignore"
|
||||
- "renovate.json"
|
||||
- "debian/**"
|
||||
- "docker/**"
|
||||
- "pkg/**"
|
||||
- "docs/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitignore"
|
||||
- "renovate.json"
|
||||
- "pkg/**"
|
||||
- "docs/**"
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
@ -43,6 +53,9 @@ jobs:
|
|||
let images = []
|
||||
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
||||
images.push(builtinImage)
|
||||
} else {
|
||||
// Fallback to official registry for forks/PRs without credentials
|
||||
images.push('forgejo.ellis.link/continuwuation/continuwuity')
|
||||
}
|
||||
core.setOutput('images', images.join("\n"))
|
||||
core.setOutput('images_list', images.join(","))
|
||||
|
@ -93,10 +106,15 @@ jobs:
|
|||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
|
@ -183,7 +201,7 @@ jobs:
|
|||
context: .
|
||||
file: "docker/Dockerfile"
|
||||
build-args: |
|
||||
GIT_COMMIT_HASH=${{ github.sha }})
|
||||
GIT_COMMIT_HASH=${{ github.sha }}
|
||||
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
||||
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
||||
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
||||
|
@ -193,7 +211,7 @@ jobs:
|
|||
cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
sbom: true
|
||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||
outputs: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', needs.define-variables.outputs.images_list) || format('type=image,"name={0}",push=false', needs.define-variables.outputs.images_list) }}
|
||||
env:
|
||||
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
||||
|
||||
|
@ -235,6 +253,7 @@ jobs:
|
|||
needs: [define-variables, build-image]
|
||||
steps:
|
||||
- name: Download digests
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: forgejo/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
|
@ -242,6 +261,7 @@ jobs:
|
|||
merge-multiple: true
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
|
@ -249,9 +269,15 @@ jobs:
|
|||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
||||
|
||||
- name: Extract metadata (tags) for Docker
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
|
@ -269,6 +295,7 @@ jobs:
|
|||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
|
||||
- name: Create manifest list and push
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
working-directory: /tmp/digests
|
||||
env:
|
||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||
|
@ -286,6 +313,7 @@ jobs:
|
|||
done
|
||||
|
||||
- name: Inspect image
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
env:
|
||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||
shell: bash
|
||||
|
|
1
.mailmap
1
.mailmap
|
@ -13,3 +13,4 @@ Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
|
|||
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
|
||||
Timo Kösters <timo@koesters.xyz>
|
||||
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
|
||||
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>
|
||||
|
|
940
Cargo.lock
generated
940
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
54
Cargo.toml
54
Cargo.toml
|
@ -48,15 +48,15 @@ features = ["ffi", "std", "union"]
|
|||
version = "0.6.2"
|
||||
|
||||
[workspace.dependencies.ctor]
|
||||
version = "0.2.9"
|
||||
version = "0.5.0"
|
||||
|
||||
[workspace.dependencies.cargo_toml]
|
||||
version = "0.21"
|
||||
version = "0.22"
|
||||
default-features = false
|
||||
features = ["features"]
|
||||
|
||||
[workspace.dependencies.toml]
|
||||
version = "0.8.14"
|
||||
version = "0.9.5"
|
||||
default-features = false
|
||||
features = ["parse"]
|
||||
|
||||
|
@ -411,25 +411,28 @@ default-features = false
|
|||
|
||||
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
|
||||
[workspace.dependencies.opentelemetry]
|
||||
version = "0.21.0"
|
||||
version = "0.30.0"
|
||||
|
||||
[workspace.dependencies.tracing-flame]
|
||||
version = "0.2.0"
|
||||
|
||||
[workspace.dependencies.tracing-opentelemetry]
|
||||
version = "0.22.0"
|
||||
version = "0.31.0"
|
||||
|
||||
[workspace.dependencies.opentelemetry_sdk]
|
||||
version = "0.21.2"
|
||||
version = "0.30.0"
|
||||
features = ["rt-tokio"]
|
||||
|
||||
[workspace.dependencies.opentelemetry-jaeger]
|
||||
version = "0.20.0"
|
||||
features = ["rt-tokio"]
|
||||
[workspace.dependencies.opentelemetry-otlp]
|
||||
version = "0.30.0"
|
||||
features = ["http", "trace", "logs", "metrics"]
|
||||
|
||||
[workspace.dependencies.opentelemetry-jaeger-propagator]
|
||||
version = "0.30.0"
|
||||
|
||||
# optional sentry metrics for crash/panic reporting
|
||||
[workspace.dependencies.sentry]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"backtrace",
|
||||
|
@ -445,9 +448,9 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.sentry-tracing]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
[workspace.dependencies.sentry-tower]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
|
||||
# jemalloc usage
|
||||
[workspace.dependencies.tikv-jemalloc-sys]
|
||||
|
@ -476,7 +479,7 @@ features = ["use_std"]
|
|||
version = "0.4"
|
||||
|
||||
[workspace.dependencies.nix]
|
||||
version = "0.29.0"
|
||||
version = "0.30.1"
|
||||
default-features = false
|
||||
features = ["resource"]
|
||||
|
||||
|
@ -498,7 +501,7 @@ version = "0.4.3"
|
|||
default-features = false
|
||||
|
||||
[workspace.dependencies.termimad]
|
||||
version = "0.31.2"
|
||||
version = "0.34.0"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.checked_ops]
|
||||
|
@ -536,11 +539,11 @@ version = "0.2"
|
|||
version = "0.2"
|
||||
|
||||
[workspace.dependencies.minicbor]
|
||||
version = "0.26.3"
|
||||
version = "2.1.1"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.minicbor-serde]
|
||||
version = "0.4.1"
|
||||
version = "0.6.0"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.maplit]
|
||||
|
@ -764,25 +767,6 @@ incremental = true
|
|||
|
||||
[profile.dev.package.conduwuit_core]
|
||||
inherits = "dev"
|
||||
#rustflags = [
|
||||
# '--cfg', 'conduwuit_mods',
|
||||
# '-Ztime-passes',
|
||||
# '-Zmir-opt-level=0',
|
||||
# '-Ztls-model=initial-exec',
|
||||
# '-Cprefer-dynamic=true',
|
||||
# '-Zstaticlib-prefer-dynamic=true',
|
||||
# '-Zstaticlib-allow-rdylib-deps=true',
|
||||
# '-Zpacked-bundled-libs=false',
|
||||
# '-Zplt=true',
|
||||
# '-Clink-arg=-Wl,--as-needed',
|
||||
# '-Clink-arg=-Wl,--allow-shlib-undefined',
|
||||
# '-Clink-arg=-Wl,-z,lazy',
|
||||
# '-Clink-arg=-Wl,-z,unique',
|
||||
# '-Clink-arg=-Wl,-z,nodlopen',
|
||||
# '-Clink-arg=-Wl,-z,nodelete',
|
||||
#]
|
||||
[profile.dev.package.xtask-generate-commands]
|
||||
inherits = "dev"
|
||||
[profile.dev.package.conduwuit]
|
||||
inherits = "dev"
|
||||
#rustflags = [
|
||||
|
|
|
@ -1,83 +0,0 @@
|
|||
[Unit]
|
||||
|
||||
Description=Continuwuity - Matrix homeserver
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
Documentation=https://continuwuity.org/
|
||||
RequiresMountsFor=/var/lib/private/conduwuit
|
||||
Alias=matrix-conduwuit.service
|
||||
|
||||
[Service]
|
||||
DynamicUser=yes
|
||||
Type=notify-reload
|
||||
ReloadSignal=SIGUSR1
|
||||
|
||||
TTYPath=/dev/tty25
|
||||
DeviceAllow=char-tty
|
||||
StandardInput=tty-force
|
||||
StandardOutput=tty
|
||||
StandardError=journal+console
|
||||
|
||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||
|
||||
TTYReset=yes
|
||||
# uncomment to allow buffer to be cleared every restart
|
||||
TTYVTDisallocate=no
|
||||
|
||||
TTYColumns=120
|
||||
TTYRows=40
|
||||
|
||||
AmbientCapabilities=
|
||||
CapabilityBoundingSet=
|
||||
|
||||
DevicePolicy=closed
|
||||
LockPersonality=yes
|
||||
MemoryDenyWriteExecute=yes
|
||||
NoNewPrivileges=yes
|
||||
#ProcSubset=pid
|
||||
ProtectClock=yes
|
||||
ProtectControlGroups=yes
|
||||
ProtectHome=yes
|
||||
ProtectHostname=yes
|
||||
ProtectKernelLogs=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectProc=invisible
|
||||
ProtectSystem=strict
|
||||
PrivateDevices=yes
|
||||
PrivateMounts=yes
|
||||
PrivateTmp=yes
|
||||
PrivateUsers=yes
|
||||
PrivateIPC=yes
|
||||
RemoveIPC=yes
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
RestrictNamespaces=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service @resources
|
||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||
SystemCallErrorNumber=EPERM
|
||||
StateDirectory=conduwuit
|
||||
|
||||
RuntimeDirectory=conduwuit
|
||||
RuntimeDirectoryMode=0750
|
||||
|
||||
Environment=CONTINUWUITY_CONFIG=%d/config.toml
|
||||
LoadCredential=config.toml:/etc/conduwuit/conduwuit.toml
|
||||
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
|
||||
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
|
||||
|
||||
ExecStart=/usr/bin/conduwuit
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
TimeoutStopSec=4m
|
||||
TimeoutStartSec=4m
|
||||
|
||||
StartLimitInterval=1m
|
||||
StartLimitBurst=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -79,9 +79,11 @@
|
|||
# This is the only directory where continuwuity will save its data,
|
||||
# including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||
#
|
||||
# YOU NEED TO EDIT THIS.
|
||||
# YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
|
||||
# `systemd` service. The service file sets it to `/var/lib/conduwuit`
|
||||
# using an environment variable and also grants write access.
|
||||
#
|
||||
# example: "/var/lib/continuwuity"
|
||||
# example: "/var/lib/conduwuit"
|
||||
#
|
||||
#database_path =
|
||||
|
||||
|
@ -589,13 +591,19 @@
|
|||
#
|
||||
#default_room_version = 11
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
|
||||
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
|
||||
# Jaeger) that supports the OpenTelemetry Protocol.
|
||||
#
|
||||
#allow_jaeger = false
|
||||
# Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
# environment variable (defaults to http://localhost:4318).
|
||||
#
|
||||
#allow_otlp = false
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Filter for OTLP tracing spans. This controls which spans are exported
|
||||
# to the OTLP collector.
|
||||
#
|
||||
#jaeger_filter = "info"
|
||||
#otlp_filter = "info"
|
||||
|
||||
# If the 'perf_measurements' compile-time feature is enabled, enables
|
||||
# collecting folded stack trace profile of tracing spans using
|
||||
|
|
70
debian/conduwuit.service
vendored
70
debian/conduwuit.service
vendored
|
@ -1,70 +0,0 @@
|
|||
[Unit]
|
||||
|
||||
Description=Continuwuity - Matrix homeserver
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
Documentation=https://continuwuity.org/
|
||||
Alias=matrix-conduwuit.service
|
||||
|
||||
[Service]
|
||||
DynamicUser=yes
|
||||
User=conduwuit
|
||||
Group=conduwuit
|
||||
Type=notify
|
||||
|
||||
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||
|
||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||
|
||||
ExecStart=/usr/sbin/conduwuit
|
||||
|
||||
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
|
||||
|
||||
AmbientCapabilities=
|
||||
CapabilityBoundingSet=
|
||||
|
||||
DevicePolicy=closed
|
||||
LockPersonality=yes
|
||||
MemoryDenyWriteExecute=yes
|
||||
NoNewPrivileges=yes
|
||||
#ProcSubset=pid
|
||||
ProtectClock=yes
|
||||
ProtectControlGroups=yes
|
||||
ProtectHome=yes
|
||||
ProtectHostname=yes
|
||||
ProtectKernelLogs=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectProc=invisible
|
||||
ProtectSystem=strict
|
||||
PrivateDevices=yes
|
||||
PrivateMounts=yes
|
||||
PrivateTmp=yes
|
||||
PrivateUsers=yes
|
||||
PrivateIPC=yes
|
||||
RemoveIPC=yes
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
RestrictNamespaces=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service @resources
|
||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||
SystemCallErrorNumber=EPERM
|
||||
#StateDirectory=conduwuit
|
||||
|
||||
RuntimeDirectory=conduwuit
|
||||
RuntimeDirectoryMode=0750
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
TimeoutStopSec=2m
|
||||
TimeoutStartSec=2m
|
||||
|
||||
StartLimitInterval=1m
|
||||
StartLimitBurst=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
44
debian/postinst
vendored
44
debian/postinst
vendored
|
@ -1,44 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# TODO: implement debconf support that is maintainable without duplicating the config
|
||||
#. /usr/share/debconf/confmodule
|
||||
|
||||
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
||||
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# Create the `conduwuit` user if it does not exist yet.
|
||||
if ! getent passwd conduwuit > /dev/null ; then
|
||||
echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2
|
||||
adduser --system --group --quiet \
|
||||
--home "$CONDUWUIT_DATABASE_PATH" \
|
||||
--disabled-login \
|
||||
--shell "/usr/sbin/nologin" \
|
||||
conduwuit
|
||||
fi
|
||||
|
||||
# Create the database path if it does not exist yet and fix up ownership
|
||||
# and permissions for the config.
|
||||
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
|
||||
|
||||
# symlink the previous location for compatibility if it does not exist yet.
|
||||
if ! test -L "/var/lib/matrix-conduit" ; then
|
||||
ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
|
||||
fi
|
||||
|
||||
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
|
||||
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
|
||||
|
||||
chmod -v 740 "$CONDUWUIT_DATABASE_PATH"
|
||||
|
||||
echo ''
|
||||
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
||||
echo 'To start the server, run: systemctl start conduwuit.service'
|
||||
echo ''
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
|
@ -199,32 +199,57 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
|||
EOF
|
||||
|
||||
# Extract dynamically linked dependencies
|
||||
RUN <<EOF
|
||||
RUN <<'DEPS_EOF'
|
||||
set -o xtrace
|
||||
mkdir /out/libs
|
||||
mkdir /out/libs-root
|
||||
mkdir /out/libs /out/libs-root
|
||||
|
||||
# Process each binary
|
||||
for BINARY in /out/sbin/*; do
|
||||
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
|
||||
if lddtree_output=$(lddtree "$BINARY" 2>/dev/null) && [ -n "$lddtree_output" ]; then
|
||||
echo "$lddtree_output" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | \
|
||||
awk '{dest = ($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2; print "install -D " $1 " " dest}' | \
|
||||
while read cmd; do eval "$cmd"; done
|
||||
fi
|
||||
done
|
||||
EOF
|
||||
|
||||
# Show what will be copied to runtime
|
||||
echo "=== Libraries being copied to runtime image:"
|
||||
find /out/libs* -type f 2>/dev/null | sort || echo "No libraries found"
|
||||
DEPS_EOF
|
||||
|
||||
FROM ubuntu:latest AS prepper
|
||||
|
||||
# Create layer structure
|
||||
RUN mkdir -p /layer1/etc/ssl/certs \
|
||||
/layer2/usr/lib \
|
||||
/layer3/sbin /layer3/sbom
|
||||
|
||||
# Copy SSL certs and root-path libraries to layer1 (ultra-stable)
|
||||
COPY --from=base /etc/ssl/certs /layer1/etc/ssl/certs
|
||||
COPY --from=builder /out/libs-root/ /layer1/
|
||||
|
||||
# Copy application libraries to layer2 (semi-stable)
|
||||
COPY --from=builder /out/libs/ /layer2/usr/lib/
|
||||
|
||||
# Copy binaries and SBOM to layer3 (volatile)
|
||||
COPY --from=builder /out/sbin/ /layer3/sbin/
|
||||
COPY --from=builder /out/sbom/ /layer3/sbom/
|
||||
|
||||
# Fix permissions after copying
|
||||
RUN chmod -R 755 /layer1 /layer2 /layer3
|
||||
|
||||
FROM scratch
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Copy root certs for tls into image
|
||||
# You can also mount the certs from the host
|
||||
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
|
||||
COPY --from=base /etc/ssl/certs /etc/ssl/certs
|
||||
# Copy ultra-stable layer (SSL certs, system libraries)
|
||||
COPY --from=prepper /layer1/ /
|
||||
|
||||
# Copy our build
|
||||
COPY --from=builder /out/sbin/ /sbin/
|
||||
# Copy SBOM
|
||||
COPY --from=builder /out/sbom/ /sbom/
|
||||
# Copy semi-stable layer (application libraries)
|
||||
COPY --from=prepper /layer2/ /
|
||||
|
||||
# Copy dynamic libraries to root
|
||||
COPY --from=builder /out/libs-root/ /
|
||||
COPY --from=builder /out/libs/ /usr/lib/
|
||||
# Copy volatile layer (binaries, SBOM)
|
||||
COPY --from=prepper /layer3/ /
|
||||
|
||||
# Inform linker where to find libraries
|
||||
ENV LD_LIBRARY_PATH=/usr/lib
|
||||
|
|
|
@ -9,24 +9,11 @@
|
|||
|
||||
</details>
|
||||
|
||||
## Debian systemd unit file
|
||||
## systemd unit file
|
||||
|
||||
<details>
|
||||
<summary>Debian systemd unit file</summary>
|
||||
<summary>systemd unit file</summary>
|
||||
|
||||
```
|
||||
{{#include ../../debian/conduwuit.service}}
|
||||
{{#include ../../pkg/conduwuit.service}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Arch Linux systemd unit file
|
||||
|
||||
<details>
|
||||
<summary>Arch Linux systemd unit file</summary>
|
||||
|
||||
```
|
||||
{{#include ../../arch/conduwuit.service}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
|
|
@ -1 +1 @@
|
|||
{{#include ../../debian/README.md}}
|
||||
{{#include ../../pkg/debian/README.md}}
|
||||
|
|
20
flake.lock
generated
20
flake.lock
generated
|
@ -513,23 +513,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rocksdb": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1753385396,
|
||||
"narHash": "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=",
|
||||
"ref": "10.4.fb",
|
||||
"rev": "28d4b7276c16ed3e28af1bd96162d6442ce25923",
|
||||
"revCount": 13318,
|
||||
"type": "git",
|
||||
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
||||
},
|
||||
"original": {
|
||||
"ref": "10.4.fb",
|
||||
"type": "git",
|
||||
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"attic": "attic",
|
||||
|
@ -539,8 +522,7 @@
|
|||
"flake-compat": "flake-compat_3",
|
||||
"flake-utils": "flake-utils",
|
||||
"nix-filter": "nix-filter",
|
||||
"nixpkgs": "nixpkgs_5",
|
||||
"rocksdb": "rocksdb"
|
||||
"nixpkgs": "nixpkgs_5"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
|
|
14
flake.nix
14
flake.nix
|
@ -16,10 +16,6 @@
|
|||
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
||||
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
||||
rocksdb = {
|
||||
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=10.4.fb";
|
||||
flake = false;
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
|
@ -48,7 +44,7 @@
|
|||
pkgs.lib.makeScope pkgs.newScope (self: {
|
||||
inherit pkgs inputs;
|
||||
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
|
||||
main = self.callPackage ./nix/pkgs/main { };
|
||||
main = self.callPackage ./pkg/nix/pkgs/main { };
|
||||
liburing = pkgs.liburing.overrideAttrs {
|
||||
# Tests weren't building
|
||||
outputs = [
|
||||
|
@ -65,7 +61,13 @@
|
|||
inherit (self) liburing;
|
||||
}).overrideAttrs
|
||||
(old: {
|
||||
src = inputs.rocksdb;
|
||||
src = pkgsHost.fetchFromGitea {
|
||||
domain = "forgejo.ellis.link";
|
||||
owner = "continuwuation";
|
||||
repo = "rocksdb";
|
||||
rev = "10.4.fb";
|
||||
sha256 = "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=";
|
||||
};
|
||||
version = "v10.4.fb";
|
||||
cmakeFlags =
|
||||
pkgs.lib.subtractLists [
|
||||
|
|
|
@ -9,12 +9,14 @@ Alias=matrix-conduwuit.service
|
|||
DynamicUser=yes
|
||||
User=conduwuit
|
||||
Group=conduwuit
|
||||
Type=notify
|
||||
Type=notify-reload
|
||||
ReloadSignal=SIGUSR1
|
||||
|
||||
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||
|
||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
|
||||
|
||||
ExecStart=/usr/bin/conduwuit
|
||||
|
||||
|
@ -58,8 +60,8 @@ RuntimeDirectoryMode=0750
|
|||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
TimeoutStopSec=2m
|
||||
TimeoutStartSec=2m
|
||||
TimeoutStopSec=4m
|
||||
TimeoutStartSec=4m
|
||||
|
||||
StartLimitInterval=1m
|
||||
StartLimitBurst=5
|
20
pkg/debian/postinst
Normal file
20
pkg/debian/postinst
Normal file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# TODO: implement debconf support that is maintainable without duplicating the config
|
||||
#. /usr/share/debconf/confmodule
|
||||
|
||||
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
||||
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
echo ''
|
||||
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
||||
echo 'To start the server, run: systemctl start conduwuit.service'
|
||||
echo ''
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
|
@ -20,24 +20,18 @@ case $1 in
|
|||
|
||||
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
|
||||
if test -L "$CONDUWUIT_CONFIG_PATH"; then
|
||||
echo "Deleting conduwuit configuration files"
|
||||
echo "Deleting continuwuity configuration files"
|
||||
rm -v -r "$CONDUWUIT_CONFIG_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
|
||||
if test -L "$CONDUWUIT_DATABASE_PATH"; then
|
||||
echo "Deleting conduwuit database directory"
|
||||
echo "Deleting continuwuity database directory"
|
||||
rm -r "$CONDUWUIT_DATABASE_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
|
||||
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
|
||||
echo "Removing matrix-conduit symlink"
|
||||
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
|
@ -51,7 +51,7 @@ find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
|
|||
|
||||
%install
|
||||
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
|
||||
install -Dpm0644 fedora/conduwuit.service -t %{buildroot}%{_unitdir}
|
||||
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
|
||||
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
|
||||
|
||||
%files
|
|
@ -89,6 +89,7 @@ serde_yaml.workspace = true
|
|||
tokio.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
||||
ctor.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
@ -29,6 +29,8 @@ pub(crate) use crate::{context::Context, utils::get_room_info};
|
|||
|
||||
pub(crate) const PAGE_SIZE: usize = 100;
|
||||
|
||||
use ctor::{ctor, dtor};
|
||||
|
||||
conduwuit::mod_ctor! {}
|
||||
conduwuit::mod_dtor! {}
|
||||
conduwuit::rustc_flags_capture! {}
|
||||
|
|
|
@ -93,6 +93,7 @@ serde.workspace = true
|
|||
sha1.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing.workspace = true
|
||||
ctor.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
@ -321,7 +321,7 @@ pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Opti
|
|||
filter.matches(pdu).then_some(item)
|
||||
}
|
||||
|
||||
#[cfg_attr(debug_assertions, conduwuit::ctor)]
|
||||
#[cfg_attr(debug_assertions, ctor::ctor)]
|
||||
fn _is_sorted() {
|
||||
debug_assert!(
|
||||
IGNORED_MESSAGE_TYPES.is_sorted(),
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
#![allow(deprecated)]
|
||||
|
||||
use std::borrow::Borrow;
|
||||
use std::{borrow::Borrow, time::Instant, vec};
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Result, at, err,
|
||||
Err, Event, Result, at, debug, err, info,
|
||||
matrix::event::gen_event_id_canonical_json,
|
||||
utils::stream::{IterStream, TryBroadbandExt},
|
||||
trace,
|
||||
utils::stream::{BroadbandExt, IterStream, TryBroadbandExt},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
|
@ -25,12 +26,14 @@ use serde_json::value::{RawValue as RawJsonValue, to_raw_value};
|
|||
use crate::Ruma;
|
||||
|
||||
/// helper method for /send_join v1 and v2
|
||||
#[tracing::instrument(skip(services, pdu, omit_members), fields(room_id = room_id.as_str(), origin = origin.as_str()))]
|
||||
async fn create_join_event(
|
||||
services: &Services,
|
||||
origin: &ServerName,
|
||||
room_id: &RoomId,
|
||||
pdu: &RawJsonValue,
|
||||
) -> Result<create_join_event::v1::RoomState> {
|
||||
omit_members: bool,
|
||||
) -> Result<create_join_event::v2::RoomState> {
|
||||
if !services.rooms.metadata.exists(room_id).await {
|
||||
return Err!(Request(NotFound("Room is unknown to this server.")));
|
||||
}
|
||||
|
@ -201,6 +204,7 @@ async fn create_join_event(
|
|||
.lock(room_id)
|
||||
.await;
|
||||
|
||||
debug!("Acquired send_join mutex, persisting join event");
|
||||
let pdu_id = services
|
||||
.rooms
|
||||
.event_handler
|
||||
|
@ -210,7 +214,7 @@ async fn create_join_event(
|
|||
.ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?;
|
||||
|
||||
drop(mutex_lock);
|
||||
|
||||
debug!("Fetching current state IDs");
|
||||
let state_ids: Vec<OwnedEventId> = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
|
@ -219,9 +223,25 @@ async fn create_join_event(
|
|||
.collect()
|
||||
.await;
|
||||
|
||||
#[allow(clippy::unnecessary_unwrap)]
|
||||
let state = state_ids
|
||||
.iter()
|
||||
.try_stream()
|
||||
.broad_filter_map(|event_id| async move {
|
||||
if omit_members && event_id.is_ok() {
|
||||
let pdu = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu(event_id.as_ref().unwrap())
|
||||
.await;
|
||||
if pdu.is_ok_and(|p| p.kind().to_cow_str() == "m.room.member") {
|
||||
trace!("omitting member event {event_id:?} from returned state");
|
||||
// skip members
|
||||
return None;
|
||||
}
|
||||
}
|
||||
Some(event_id)
|
||||
})
|
||||
.broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id))
|
||||
.broad_and_then(|pdu| {
|
||||
services
|
||||
|
@ -238,6 +258,17 @@ async fn create_join_event(
|
|||
.rooms
|
||||
.auth_chain
|
||||
.event_ids_iter(room_id, starting_events)
|
||||
// .broad_filter_map(|event_id| async {
|
||||
// if omit_members && event_id.as_ref().is_ok_and(|e| state_ids.contains(e)) {
|
||||
// // Don't include this event if it's already in the state
|
||||
// trace!(
|
||||
// "omitting member event {event_id:?} from returned auth chain as it is \
|
||||
// already in state"
|
||||
// );
|
||||
// return None;
|
||||
// }
|
||||
// Some(event_id)
|
||||
// })
|
||||
.broad_and_then(|event_id| async move {
|
||||
services.rooms.timeline.get_pdu_json(&event_id).await
|
||||
})
|
||||
|
@ -252,11 +283,32 @@ async fn create_join_event(
|
|||
.await?;
|
||||
|
||||
services.sending.send_pdu_room(room_id, &pdu_id).await?;
|
||||
|
||||
Ok(create_join_event::v1::RoomState {
|
||||
let servers_in_room: Option<Vec<_>> = if !omit_members {
|
||||
None
|
||||
} else {
|
||||
debug!("Fetching list of servers in room");
|
||||
let servers: Vec<String> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_servers(room_id)
|
||||
.map(|sn| sn.as_str().to_owned())
|
||||
.collect()
|
||||
.await;
|
||||
// If there's no servers, just add us
|
||||
let servers = if servers.is_empty() {
|
||||
vec![services.globals.server_name().to_string()]
|
||||
} else {
|
||||
servers
|
||||
};
|
||||
Some(servers)
|
||||
};
|
||||
debug!("Returning send_join data");
|
||||
Ok(create_join_event::v2::RoomState {
|
||||
auth_chain,
|
||||
state,
|
||||
event: to_raw_value(&CanonicalJsonValue::Object(value)).ok(),
|
||||
members_omitted: omit_members,
|
||||
servers_in_room,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -294,11 +346,24 @@ pub(crate) async fn create_join_event_v1_route(
|
|||
}
|
||||
}
|
||||
|
||||
let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu)
|
||||
info!("Providing send_join for {} in {}", body.origin(), &body.room_id);
|
||||
let now = Instant::now();
|
||||
let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu, false)
|
||||
.boxed()
|
||||
.await?;
|
||||
let transformed = create_join_event::v1::RoomState {
|
||||
auth_chain: room_state.auth_chain,
|
||||
state: room_state.state,
|
||||
event: room_state.event,
|
||||
};
|
||||
info!(
|
||||
"Finished creating the send_join payload for {} in {} in {:?}",
|
||||
body.origin(),
|
||||
&body.room_id,
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
Ok(create_join_event::v1::Response { room_state })
|
||||
Ok(create_join_event::v1::Response { room_state: transformed })
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}`
|
||||
|
@ -329,17 +394,18 @@ pub(crate) async fn create_join_event_v2_route(
|
|||
}
|
||||
}
|
||||
|
||||
let create_join_event::v1::RoomState { auth_chain, state, event } =
|
||||
create_join_event(&services, body.origin(), &body.room_id, &body.pdu)
|
||||
info!("Providing send_join for {} in {}", body.origin(), &body.room_id);
|
||||
let now = Instant::now();
|
||||
let room_state =
|
||||
create_join_event(&services, body.origin(), &body.room_id, &body.pdu, body.omit_members)
|
||||
.boxed()
|
||||
.await?;
|
||||
let room_state = create_join_event::v2::RoomState {
|
||||
members_omitted: false,
|
||||
auth_chain,
|
||||
state,
|
||||
event,
|
||||
servers_in_room: None,
|
||||
};
|
||||
info!(
|
||||
"Finished creating the send_join payload for {} in {} in {:?}",
|
||||
body.origin(),
|
||||
&body.room_id,
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
Ok(create_join_event::v2::Response { room_state })
|
||||
}
|
||||
|
|
|
@ -126,9 +126,11 @@ pub struct Config {
|
|||
/// This is the only directory where continuwuity will save its data,
|
||||
/// including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||
///
|
||||
/// YOU NEED TO EDIT THIS.
|
||||
/// YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
|
||||
/// `systemd` service. The service file sets it to `/var/lib/conduwuit`
|
||||
/// using an environment variable and also grants write access.
|
||||
///
|
||||
/// example: "/var/lib/continuwuity"
|
||||
/// example: "/var/lib/conduwuit"
|
||||
pub database_path: PathBuf,
|
||||
|
||||
/// continuwuity supports online database backups using RocksDB's Backup
|
||||
|
@ -712,12 +714,21 @@ pub struct Config {
|
|||
#[serde(default)]
|
||||
pub well_known: WellKnownConfig,
|
||||
|
||||
#[serde(default)]
|
||||
pub allow_jaeger: bool,
|
||||
/// Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
|
||||
/// Jaeger exporter. Traces will be sent via OTLP to a collector (such as
|
||||
/// Jaeger) that supports the OpenTelemetry Protocol.
|
||||
///
|
||||
/// Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
/// environment variable (defaults to http://localhost:4318).
|
||||
#[serde(default, alias = "allow_jaeger")]
|
||||
pub allow_otlp: bool,
|
||||
|
||||
/// Filter for OTLP tracing spans. This controls which spans are exported
|
||||
/// to the OTLP collector.
|
||||
///
|
||||
/// default: "info"
|
||||
#[serde(default = "default_jaeger_filter")]
|
||||
pub jaeger_filter: String,
|
||||
#[serde(default = "default_otlp_filter", alias = "jaeger_filter")]
|
||||
pub otlp_filter: String,
|
||||
|
||||
/// If the 'perf_measurements' compile-time feature is enabled, enables
|
||||
/// collecting folded stack trace profile of tracing spans using
|
||||
|
@ -2365,7 +2376,7 @@ fn default_tracing_flame_filter() -> String {
|
|||
.to_owned()
|
||||
}
|
||||
|
||||
fn default_jaeger_filter() -> String {
|
||||
fn default_otlp_filter() -> String {
|
||||
cfg!(debug_assertions)
|
||||
.then_some("trace,h2=off")
|
||||
.unwrap_or("info")
|
||||
|
|
|
@ -66,6 +66,7 @@ serde.workspace = true
|
|||
serde_json.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing.workspace = true
|
||||
ctor.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
extern crate conduwuit_core as conduwuit;
|
||||
extern crate rust_rocksdb as rocksdb;
|
||||
|
||||
use ctor::{ctor, dtor};
|
||||
|
||||
conduwuit::mod_ctor! {}
|
||||
conduwuit::mod_dtor! {}
|
||||
conduwuit::rustc_flags_capture! {}
|
||||
|
|
|
@ -13,13 +13,13 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream {
|
|||
let ret = quote! {
|
||||
pub static RUSTC_FLAGS: [&str; #flag_len] = [#( #flag ),*];
|
||||
|
||||
#[conduwuit_core::ctor]
|
||||
#[ctor]
|
||||
fn _set_rustc_flags() {
|
||||
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
|
||||
}
|
||||
|
||||
// static strings have to be yanked on module unload
|
||||
#[conduwuit_core::dtor]
|
||||
#[dtor]
|
||||
fn _unset_rustc_flags() {
|
||||
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
|
||||
}
|
||||
|
|
|
@ -32,12 +32,12 @@ a cool hard fork of Conduit, a Matrix homeserver written in Rust"""
|
|||
section = "net"
|
||||
priority = "optional"
|
||||
conf-files = ["/etc/conduwuit/conduwuit.toml"]
|
||||
maintainer-scripts = "../../debian/"
|
||||
systemd-units = { unit-name = "conduwuit", start = false }
|
||||
maintainer-scripts = "../../pkg/debian/"
|
||||
systemd-units = { unit-name = "conduwuit", start = false, unit-scripts = "../../pkg/" }
|
||||
assets = [
|
||||
["../../debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"],
|
||||
["../../pkg/debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"],
|
||||
["../../README.md", "usr/share/doc/conduwuit/", "644"],
|
||||
["../../target/release/conduwuit", "usr/sbin/conduwuit", "755"],
|
||||
["../../target/release/conduwuit", "usr/bin/conduwuit", "755"],
|
||||
["../../conduwuit-example.toml", "etc/conduwuit/conduwuit.toml", "640"],
|
||||
]
|
||||
|
||||
|
@ -126,7 +126,8 @@ perf_measurements = [
|
|||
"dep:tracing-flame",
|
||||
"dep:tracing-opentelemetry",
|
||||
"dep:opentelemetry_sdk",
|
||||
"dep:opentelemetry-jaeger",
|
||||
"dep:opentelemetry-otlp",
|
||||
"dep:opentelemetry-jaeger-propagator",
|
||||
"conduwuit-core/perf_measurements",
|
||||
"conduwuit-core/sentry_telemetry",
|
||||
]
|
||||
|
@ -202,11 +203,14 @@ clap.workspace = true
|
|||
console-subscriber.optional = true
|
||||
console-subscriber.workspace = true
|
||||
const-str.workspace = true
|
||||
ctor.workspace = true
|
||||
log.workspace = true
|
||||
opentelemetry-jaeger.optional = true
|
||||
opentelemetry-jaeger.workspace = true
|
||||
opentelemetry.optional = true
|
||||
opentelemetry.workspace = true
|
||||
opentelemetry-otlp.optional = true
|
||||
opentelemetry-otlp.workspace = true
|
||||
opentelemetry-jaeger-propagator.optional = true
|
||||
opentelemetry-jaeger-propagator.workspace = true
|
||||
opentelemetry_sdk.optional = true
|
||||
opentelemetry_sdk.workspace = true
|
||||
sentry-tower.optional = true
|
||||
|
@ -226,6 +230,7 @@ tracing-subscriber.workspace = true
|
|||
tracing.workspace = true
|
||||
tracing-journald = { workspace = true, optional = true }
|
||||
|
||||
|
||||
[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies]
|
||||
hardened_malloc-rs.workspace = true
|
||||
hardened_malloc-rs.optional = true
|
||||
|
|
|
@ -7,6 +7,8 @@ use conduwuit_core::{
|
|||
log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span},
|
||||
result::UnwrapOrErr,
|
||||
};
|
||||
#[cfg(feature = "perf_measurements")]
|
||||
use opentelemetry::trace::TracerProvider;
|
||||
use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload};
|
||||
|
||||
#[cfg(feature = "perf_measurements")]
|
||||
|
@ -87,30 +89,35 @@ pub(crate) fn init(
|
|||
(None, None)
|
||||
};
|
||||
|
||||
let jaeger_filter = EnvFilter::try_new(&config.jaeger_filter)
|
||||
.map_err(|e| err!(Config("jaeger_filter", "{e}.")))?;
|
||||
let otlp_filter = EnvFilter::try_new(&config.otlp_filter)
|
||||
.map_err(|e| err!(Config("otlp_filter", "{e}.")))?;
|
||||
|
||||
let jaeger_layer = config.allow_jaeger.then(|| {
|
||||
let otlp_layer = config.allow_otlp.then(|| {
|
||||
opentelemetry::global::set_text_map_propagator(
|
||||
opentelemetry_jaeger::Propagator::new(),
|
||||
opentelemetry_jaeger_propagator::Propagator::new(),
|
||||
);
|
||||
|
||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||
.with_auto_split_batch(true)
|
||||
.with_service_name(conduwuit_core::name())
|
||||
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||
.expect("jaeger agent pipeline");
|
||||
let exporter = opentelemetry_otlp::SpanExporter::builder()
|
||||
.with_http()
|
||||
.build()
|
||||
.expect("Failed to create OTLP exporter");
|
||||
|
||||
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
|
||||
.with_batch_exporter(exporter)
|
||||
.build();
|
||||
|
||||
let tracer = provider.tracer(conduwuit_core::name());
|
||||
|
||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||
|
||||
let (jaeger_reload_filter, jaeger_reload_handle) =
|
||||
reload::Layer::new(jaeger_filter.clone());
|
||||
reload_handles.add("jaeger", Box::new(jaeger_reload_handle));
|
||||
let (otlp_reload_filter, otlp_reload_handle) =
|
||||
reload::Layer::new(otlp_filter.clone());
|
||||
reload_handles.add("otlp", Box::new(otlp_reload_handle));
|
||||
|
||||
Some(telemetry.with_filter(jaeger_reload_filter))
|
||||
Some(telemetry.with_filter(otlp_reload_filter))
|
||||
});
|
||||
|
||||
let subscriber = subscriber.with(flame_layer).with(jaeger_layer);
|
||||
let subscriber = subscriber.with(flame_layer).with(otlp_layer);
|
||||
(subscriber, flame_guard)
|
||||
};
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ mod sentry;
|
|||
mod server;
|
||||
mod signal;
|
||||
|
||||
use ctor::{ctor, dtor};
|
||||
use server::Server;
|
||||
|
||||
rustc_flags_capture! {}
|
||||
|
|
|
@ -125,6 +125,7 @@ tokio.workspace = true
|
|||
tower.workspace = true
|
||||
tower-http.workspace = true
|
||||
tracing.workspace = true
|
||||
ctor.workspace = true
|
||||
|
||||
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
|
||||
sd-notify.workspace = true
|
||||
|
|
|
@ -12,6 +12,7 @@ use std::{panic::AssertUnwindSafe, pin::Pin, sync::Arc};
|
|||
|
||||
use conduwuit::{Error, Result, Server};
|
||||
use conduwuit_service::Services;
|
||||
use ctor::{ctor, dtor};
|
||||
use futures::{Future, FutureExt, TryFutureExt};
|
||||
|
||||
conduwuit::mod_ctor! {}
|
||||
|
|
|
@ -117,6 +117,7 @@ webpage.optional = true
|
|||
blurhash.workspace = true
|
||||
blurhash.optional = true
|
||||
recaptcha-verify = { version = "0.1.5", default-features = false }
|
||||
ctor.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
@ -33,6 +33,7 @@ pub mod users;
|
|||
extern crate conduwuit_core as conduwuit;
|
||||
extern crate conduwuit_database as database;
|
||||
|
||||
use ctor::{ctor, dtor};
|
||||
pub(crate) use service::{Args, Dep, Service};
|
||||
|
||||
pub use crate::services::Services;
|
||||
|
|
|
@ -452,7 +452,7 @@ async fn get_statediff(&self, shortstatehash: ShortStateHash) -> Result<StateDif
|
|||
.ok()
|
||||
.take_if(|parent| *parent != 0);
|
||||
|
||||
debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride");
|
||||
debug_assert!(value.len().is_multiple_of(STRIDE), "value not aligned to stride");
|
||||
let _num_values = value.len() / STRIDE;
|
||||
|
||||
let mut add_mode = true;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue