mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-09-09 19:13:03 +02:00
Compare commits
424 commits
v0.5.0-rc4
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
8f186cd770 | ||
|
5d3e10a048 |
||
|
1e541875ad |
||
|
90fd92977e | ||
|
e27ef7f5ec | ||
|
16f4efa708 | ||
|
e38dec5864 | ||
|
f3824ffc3d | ||
|
e3fbf7a143 | ||
|
09de586dc7 |
||
|
d1fff1d09f |
||
|
f47474d12a |
||
|
53da294e53 |
||
|
2cdccbf2fe |
||
|
6cf3c839e4 | ||
|
4a1091dd06 | ||
|
1e9701f379 | ||
|
2cedf0d2e1 | ||
|
84fdcd326a | ||
|
d640853f9d | ||
|
fff9629b0f | ||
|
1a3107c20a | ||
|
969d7cbb66 | ||
|
cd238b05de |
||
|
c0e3829fed |
||
|
1d7dda6cf5 |
||
|
6f19931c5b |
||
|
2516e783ba | ||
|
fdf5771387 |
||
|
58bbc0e676 | ||
|
0d58e660a2 | ||
|
e7124edb73 | ||
|
d19e0f0d97 | ||
|
467aed3028 | ||
|
99b44bbf09 | ||
|
95aeff8cdc | ||
|
9e62e66ae4 | ||
|
76b93e252d | ||
|
66d479e2eb | ||
|
241371463e | ||
|
d970df5fd2 |
||
|
4e644961f3 |
||
|
35cf9af5c8 |
||
|
04e796176a |
||
|
9783940105 |
||
|
1e430f9470 |
||
|
5cce024841 | ||
|
e87c461b8d |
||
|
b934898f51 |
||
|
83e3de55a4 |
||
|
609e239436 | ||
|
34417c96ae | ||
|
f33f281edb | ||
|
ddbca59193 | ||
|
b5a2e49ae4 | ||
|
37248a4f68 |
||
|
dd22325ea2 | ||
|
30a56d5cb9 |
||
|
3183210459 |
||
|
57d7743037 | ||
|
cb09bfa4e7 | ||
|
0ed691edef | ||
|
c58b9f05ed | ||
|
fb7e739b72 | ||
|
c7adbae03f | ||
|
8b35de6a43 |
||
|
d191494f18 |
||
|
6d1f12b22d |
||
|
ca3ee9224b |
||
|
427b973b67 |
||
|
aacaf5a2a0 | ||
|
256bed992e |
||
|
ecb87ccd1c |
||
|
14a4b24fc5 | ||
|
731761f0fc | ||
|
4524a00fc6 | ||
|
9db750e97c | ||
|
b14a4d470b | ||
|
5d1f141882 | ||
|
b447cfff56 | ||
|
283888e788 | ||
|
f54e59a068 | ||
|
2a183cc5a4 | ||
|
54acd07555 |
||
|
583cb924f1 | ||
|
9286838d23 | ||
|
d1ebcfaf0b | ||
|
e820551f62 | ||
|
bd3db65cb2 |
||
|
e4a43b1a5b |
||
|
5775e0ad9d |
||
|
238cc627e3 |
||
|
b1516209c4 |
||
|
0589884109 |
||
|
4a83df5b57 |
||
|
aa08edc55f |
||
|
00c7e220bb |
||
|
87be4d1a52 |
||
|
205506f206 |
||
|
66181c61af |
||
|
b7a0442298 |
||
|
1bc663e1c8 |
||
|
68b0140c42 |
||
|
f32f60d056 |
||
|
fe06d78c8e |
||
|
99ebe022ed |
||
|
f335f45017 |
||
|
1726633c0f |
||
|
dfda27fadc |
||
|
9465c5df1f |
||
|
2d475b1220 |
||
|
d7fa624fd2 |
||
|
cc9202b0c4 |
||
|
a3d62ed0d9 |
||
|
78b7175677 |
||
|
74d60f256b |
||
|
732c69f5ca |
||
|
8e7801f323 |
||
|
9017efe45b |
||
|
7e2f04a78a |
||
|
d74514f305 |
||
|
95610499c7 |
||
|
f593cac58a |
||
|
1c985c59f5 |
||
|
b635e825d2 |
||
|
6d29098d1a |
||
|
374fb2745c |
||
|
a1d616e3e3 |
||
|
30a8c06fd9 |
||
|
0631094350 |
||
|
9051ce63f7 |
||
|
f513cb7598 |
||
|
c639228f4d |
||
|
331832616f |
||
|
b2b18002ea |
||
|
57868a008c |
||
|
f063814d94 |
||
|
3b5335630d |
||
|
b2883c3d6e |
||
|
62bdfe1ce8 |
||
|
843e501902 |
||
|
0a8c13ffd2 |
||
|
a89ceb93d8 |
||
|
13de0ac822 |
||
|
4a5b122d77 |
||
|
2655acf269 |
||
|
3c320f6d6e |
||
|
946449d3e5 |
||
|
b17f278803 |
||
|
6a4905271e |
||
|
cfc64ddb40 |
||
|
6aceac3833 |
||
|
5bf20db8e7 | ||
|
1abe8f7835 | ||
|
ce84c46459 |
||
|
7b60f5368d |
||
|
e61a593932 | ||
|
b71186d958 |
||
|
c362499cef |
||
|
14774fa153 |
||
|
ff805d8ae1 |
||
|
f0994355d4 |
||
|
980774a275 |
||
|
e4a6abe15e |
||
|
df1cb10a8e |
||
|
651d07a609 |
||
|
13b21b00a9 | ||
|
c3c33f47e2 | ||
|
564e7097e6 | ||
|
925e200d9c | ||
|
b5bf68b8c8 |
||
|
6289bcaabc |
||
|
cb138f5039 |
||
|
36a7bd7eb3 |
||
|
520a179bb0 |
||
|
09199b0ea7 |
||
|
0e2fdc415c | ||
|
8fb94f99e9 | ||
|
3977ccfcea | ||
|
890b8e25fc | ||
|
28a29c3a7b |
||
|
d98ce2c7b9 |
||
|
18d12a7756 |
||
|
928b7c5e4a |
||
|
af8783ee51 |
||
|
52954c5b75 | ||
|
7e406445d4 | ||
|
293e7243b3 |
||
|
143cb55ac8 |
||
|
3c7c641d2d |
||
|
36e81ba185 |
||
|
56420a67ca |
||
|
c5c309ec43 |
||
|
c06aa49a90 |
||
|
364293608d |
||
|
af4f66c768 |
||
|
116f85360f |
||
|
3d0360bcd6 |
||
|
667afedd24 |
||
|
21bbee8e3c |
||
|
732a77f3a8 |
||
|
f3dd90df39 |
||
|
2051c22a28 |
||
|
49f7a2487f |
||
|
d6aa03ea73 | ||
|
8e0852e5b5 | ||
|
6e60918584 | ||
|
68afb07c27 |
||
|
b44791799c |
||
|
4f69da47c6 |
||
|
24d2a514e2 |
||
|
f49c73c031 |
||
|
59912709aa |
||
|
97e5cc4e2d |
||
|
17930708d8 |
||
|
ec9d3d613e |
||
|
d4862b8ead | ||
|
acb74faa07 | ||
|
ecc6fda98b | ||
|
13e17d52e0 | ||
|
d8a27eeb54 | ||
|
eb2e3b3bb7 | ||
|
72f8cb3038 | ||
|
1124097bd1 | ||
|
08527a2880 | ||
|
8e06571e7c | ||
|
90180916eb | ||
|
d0548ec064 | ||
|
1ff8af8e9e | ||
|
cc864dc8bb | ||
|
8791a9b851 | ||
|
968c0e236c | ||
|
5d5350a9fe | ||
|
e127c4e5a2 | ||
|
a94128e698 | ||
|
a6ba9e3045 | ||
|
286974cb9a | ||
|
accfda2586 | ||
|
fac9e090cd | ||
|
b4bdd1ee65 |
||
|
4b5e8df95c |
||
|
d63c8b9fca |
||
|
9b6ac6c45f | ||
|
52e042cb06 | ||
|
f508e7654c | ||
|
543ab27747 |
||
|
c82ea24069 |
||
|
db58d841aa |
||
|
f1ca84fcaf |
||
|
63962fc040 |
||
|
a24278dc1b |
||
|
b787e97dc1 |
||
|
eb75c4ecb0 |
||
|
9bbe333082 |
||
|
3177545a6f |
||
|
4a289a9fee | ||
|
4d69a1ad51 |
||
|
4f174324ba |
||
|
2ecbd75d64 |
||
|
a682e9dbb8 |
||
|
46c193e74b |
||
|
93719018a8 |
||
|
70df8364b3 |
||
|
bae8192fb3 |
||
|
add5c7052c |
||
|
01200d9b54 |
||
|
0ba4a265be |
||
|
08fbcbba69 |
||
|
b526935d45 |
||
|
a737d845a4 |
||
|
e508b1197f | ||
|
d6fd30393c | ||
|
6e16a6ef8f |
||
|
0870c8d647 |
||
|
d0f00e6f5c |
||
|
5d44653e3a |
||
|
44e60d0ea6 |
||
|
d7514178ab |
||
|
1d45e0b68c |
||
|
3c44dccd65 |
||
|
b57be072c7 |
||
|
ea5dc8e09d |
||
|
b9d60c64e5 |
||
|
94ae824149 |
||
|
640714922b |
||
|
2b268fdaf3 |
||
|
e8d823a653 |
||
|
0ba77674c7 |
||
|
2ccbd7d60b |
||
|
60960c6e09 |
||
|
ce40304667 |
||
|
dcbc4b54c5 |
||
|
fce024b30b |
||
|
3e4e696761 |
||
|
f605913ea9 |
||
|
44302ce732 |
||
|
bfb0a2b76a |
||
|
fcd5669aa1 |
||
|
9b8b37f162 |
||
|
7a46563f23 |
||
|
1bf6537319 |
||
|
4ed04b343a |
||
|
a4ad72e11d |
||
|
1f57508879 |
||
|
a325dfa56a |
||
|
b5d2ef9a4a |
||
|
e200a7d991 |
||
|
034762c619 |
||
|
e31d261e66 |
||
|
c5db43ba9a |
||
|
ec08e16b9f |
||
|
f14725a51b |
||
|
d03325c65a |
||
|
066794fe90 |
||
|
beee996f72 |
||
|
7c58e40c96 |
||
|
5577ddca27 |
||
|
c0f46269b5 | ||
|
01594a6243 |
||
|
d78fc53577 |
||
|
e3ae024ed3 |
||
|
fb9d4c30f4 |
||
|
cbcf4300df |
||
|
a98da7d942 |
||
|
e1655edd83 |
||
|
4158c1cf62 |
||
|
edd5fc6c7e | ||
|
6b0288dd4c | ||
|
90f1a193e3 | ||
|
a9a478f077 | ||
|
b2620e6922 | ||
|
60caa448b0 | ||
|
eb886b6760 | ||
|
73c991edd0 | ||
|
c698d65a92 | ||
|
1a5ab33852 | ||
|
77c4f9ff2f |
||
|
dcbacb5b78 |
||
|
c203c1fead |
||
|
cdf105a24e |
||
|
859ec56b4f |
||
|
45872ede7a |
||
|
f83238df78 |
||
|
4c8dfc4c2c |
||
|
bfd7ab5a22 |
||
|
ee11afb460 |
||
|
1d840950b3 |
||
|
f791dc6918 |
||
|
3eb4ee7af1 |
||
|
8f21403796 |
||
|
4fbecca2d3 |
||
|
0307238bf8 |
||
|
1d42b88f50 |
||
|
81f8151aca |
||
|
66e8cd8908 |
||
|
7beff25d3d | ||
|
0c302f3137 | ||
|
c7ac2483a9 | ||
|
c68378ffe3 |
||
|
fbd404fa84 |
||
|
ff93cfdc64 |
||
|
22e7617362 |
||
|
b7b7d3a9e7 |
||
|
1c59b41ff1 | ||
|
2d9bdc0979 |
||
|
5486dbda24 |
||
|
41581c9ae8 |
||
|
d3022b4112 |
||
|
6920814da9 |
||
|
fe7963d306 |
||
|
84445b8458 |
||
|
9e62076baa |
||
|
0eb9e4f3d2 |
||
|
e71138ab6f |
||
|
8e7373c027 |
||
|
576a783a6f |
||
|
21ec255159 |
||
|
3c5bbd4f05 |
||
|
4f8fec7e5a |
||
|
fb3020d8da |
||
|
ecf20f7ebb |
||
|
b3e5d2f683 |
||
|
83126cc667 |
||
|
eac713a2a9 |
||
|
e8a64bb59d |
||
|
05e65936fa |
||
|
e7c3f78377 |
||
|
d8b56c9c35 |
||
|
75fb19a5ca |
||
|
d98ec6bf46 |
||
|
1b1198771f |
||
|
d4561e950b |
||
|
298e2af3d7 |
||
|
c5b99fbccd |
||
|
2e6ec2f89c |
||
|
b16e26952a |
||
|
9e0530839d |
||
|
d85aaabe9e |
||
|
71d2421f55 |
||
|
fb793e8315 |
||
|
10947f6f1a |
||
|
93253237e9 |
||
|
0ac1ce9996 |
||
|
3ced2e2f90 |
||
|
70cee36041 |
||
|
cacaa6c512 |
||
|
6b92e96582 |
||
|
dc599db19c |
||
|
3a95585f0e |
||
|
68d68a0645 |
||
|
773c3d457b | ||
|
b91af70e0b |
||
|
538347204f |
||
|
90880e2689 |
||
|
f76f669d16 | ||
|
dad407fb22 | ||
|
17a04940fc |
||
|
6e5392c2f5 | ||
|
57779df66a | ||
|
35bffa5970 |
||
|
4f9e9174e2 |
||
|
3e54c7e691 |
||
|
57d26dae0d |
||
|
e054a56b32 |
404 changed files with 22307 additions and 14770 deletions
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
|
@ -0,0 +1,2 @@
|
|||
[alias]
|
||||
xtask = "run --package xtask --"
|
|
@ -1,9 +1,9 @@
|
|||
# Local build and dev artifacts
|
||||
target
|
||||
tests
|
||||
target/
|
||||
|
||||
# Docker files
|
||||
Dockerfile*
|
||||
docker/
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
|
@ -11,10 +11,11 @@ Dockerfile*
|
|||
*.iml
|
||||
|
||||
# Git folder
|
||||
.git
|
||||
# .git
|
||||
.gitea
|
||||
.gitlab
|
||||
.github
|
||||
.forgejo
|
||||
|
||||
# Dot files
|
||||
.env
|
||||
|
|
|
@ -22,3 +22,11 @@ indent_size = 2
|
|||
[*.rs]
|
||||
indent_style = tab
|
||||
max_line_length = 98
|
||||
|
||||
[*.yml]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
[*.json]
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
|
2
.envrc
2
.envrc
|
@ -2,6 +2,8 @@
|
|||
|
||||
dotenv_if_exists
|
||||
|
||||
if [ -f /etc/os-release ] && grep -q '^ID=nixos' /etc/os-release; then
|
||||
use flake ".#${DIRENV_DEVSHELL:-default}"
|
||||
fi
|
||||
|
||||
PATH_add bin
|
||||
|
|
58
.forgejo/actions/detect-runner-os/action.yml
Normal file
58
.forgejo/actions/detect-runner-os/action.yml
Normal file
|
@ -0,0 +1,58 @@
|
|||
name: detect-runner-os
|
||||
description: |
|
||||
Detect the actual OS name and version of the runner.
|
||||
Provides separate outputs for name, version, and a combined slug.
|
||||
|
||||
outputs:
|
||||
name:
|
||||
description: 'OS name (e.g. Ubuntu, Debian)'
|
||||
value: ${{ steps.detect.outputs.name }}
|
||||
version:
|
||||
description: 'OS version (e.g. 22.04, 11)'
|
||||
value: ${{ steps.detect.outputs.version }}
|
||||
slug:
|
||||
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
|
||||
value: ${{ steps.detect.outputs.slug }}
|
||||
node_major:
|
||||
description: 'Major version of Node.js if available (e.g. 22)'
|
||||
value: ${{ steps.detect.outputs.node_major }}
|
||||
node_version:
|
||||
description: 'Full Node.js version if available (e.g. 22.19.0)'
|
||||
value: ${{ steps.detect.outputs.node_version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Detect runner OS
|
||||
id: detect
|
||||
shell: bash
|
||||
run: |
|
||||
# Detect OS version (try lsb_release first, fall back to /etc/os-release)
|
||||
OS_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d'"' -f2)
|
||||
|
||||
# Detect OS name and capitalise (try lsb_release first, fall back to /etc/os-release)
|
||||
OS_NAME=$(lsb_release -is 2>/dev/null || grep "^ID=" /etc/os-release | cut -d'=' -f2 | tr -d '"' | sed 's/\b\(.\)/\u\1/g')
|
||||
|
||||
# Create combined slug
|
||||
OS_SLUG="${OS_NAME}-${OS_VERSION}"
|
||||
|
||||
# Detect Node.js version if available
|
||||
if command -v node >/dev/null 2>&1; then
|
||||
NODE_VERSION=$(node --version | sed 's/v//')
|
||||
NODE_MAJOR=$(echo $NODE_VERSION | cut -d. -f1)
|
||||
echo "node_version=${NODE_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "node_major=${NODE_MAJOR}" >> $GITHUB_OUTPUT
|
||||
echo "🔍 Detected Node.js: v${NODE_VERSION}"
|
||||
else
|
||||
echo "node_version=" >> $GITHUB_OUTPUT
|
||||
echo "node_major=" >> $GITHUB_OUTPUT
|
||||
echo "🔍 Node.js not found"
|
||||
fi
|
||||
|
||||
# Set OS outputs
|
||||
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Log detection results
|
||||
echo "🔍 Detected Runner OS: ${OS_NAME} ${OS_VERSION}"
|
63
.forgejo/actions/rust-toolchain/action.yml
Normal file
63
.forgejo/actions/rust-toolchain/action.yml
Normal file
|
@ -0,0 +1,63 @@
|
|||
name: rust-toolchain
|
||||
description: |
|
||||
Install a Rust toolchain using rustup.
|
||||
See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification
|
||||
for more information about toolchains.
|
||||
inputs:
|
||||
toolchain:
|
||||
description: |
|
||||
Rust toolchain name.
|
||||
See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification
|
||||
required: false
|
||||
target:
|
||||
description: Target triple to install for this toolchain
|
||||
required: false
|
||||
components:
|
||||
description: Space-separated list of components to be additionally installed for a new toolchain
|
||||
required: false
|
||||
outputs:
|
||||
rustc_version:
|
||||
description: The rustc version installed
|
||||
value: ${{ steps.rustc-version.outputs.version }}
|
||||
rustup_version:
|
||||
description: The rustup version installed
|
||||
value: ${{ steps.rustup-version.outputs.version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Check if rustup is already installed
|
||||
shell: bash
|
||||
id: rustup-version
|
||||
run: |
|
||||
echo "version=$(rustup --version)" >> $GITHUB_OUTPUT
|
||||
- name: Cache rustup toolchains
|
||||
if: steps.rustup-version.outputs.version == ''
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.rustup
|
||||
!~/.rustup/tmp
|
||||
!~/.rustup/downloads
|
||||
# Requires repo to be cloned if toolchain is not specified
|
||||
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
||||
- name: Install Rust toolchain
|
||||
if: steps.rustup-version.outputs.version == ''
|
||||
shell: bash
|
||||
run: |
|
||||
if ! command -v rustup &> /dev/null ; then
|
||||
curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y
|
||||
echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
- shell: bash
|
||||
run: |
|
||||
set -x
|
||||
${{ inputs.toolchain && format('rustup override set {0}', inputs.toolchain) }}
|
||||
${{ inputs.target && format('rustup target add {0}', inputs.target) }}
|
||||
${{ inputs.components && format('rustup component add {0}', inputs.components) }}
|
||||
cargo --version
|
||||
rustc --version
|
||||
- id: rustc-version
|
||||
shell: bash
|
||||
run: |
|
||||
echo "version=$(rustc --version)" >> $GITHUB_OUTPUT
|
23
.forgejo/actions/sccache/action.yml
Normal file
23
.forgejo/actions/sccache/action.yml
Normal file
|
@ -0,0 +1,23 @@
|
|||
name: sccache
|
||||
description: |
|
||||
Install sccache for caching builds in GitHub Actions.
|
||||
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install sccache
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
- name: Configure sccache
|
||||
uses: https://github.com/actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
|
||||
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||
- shell: bash
|
||||
run: |
|
||||
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
|
||||
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
167
.forgejo/actions/setup-llvm-with-apt/action.yml
Normal file
167
.forgejo/actions/setup-llvm-with-apt/action.yml
Normal file
|
@ -0,0 +1,167 @@
|
|||
name: setup-llvm-with-apt
|
||||
description: |
|
||||
Set up LLVM toolchain with APT package management and smart caching.
|
||||
Supports cross-compilation architectures and additional package installation.
|
||||
|
||||
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
|
||||
|
||||
inputs:
|
||||
dpkg-arch:
|
||||
description: 'Debian architecture for cross-compilation (e.g. arm64)'
|
||||
required: false
|
||||
default: ''
|
||||
extra-packages:
|
||||
description: 'Additional APT packages to install (space-separated)'
|
||||
required: false
|
||||
default: ''
|
||||
llvm-version:
|
||||
description: 'LLVM version to install'
|
||||
required: false
|
||||
default: '20'
|
||||
|
||||
outputs:
|
||||
llvm-version:
|
||||
description: 'Installed LLVM version'
|
||||
value: ${{ steps.configure.outputs.version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Detect runner OS
|
||||
id: runner-os
|
||||
uses: ./.forgejo/actions/detect-runner-os
|
||||
|
||||
- name: Configure cross-compilation architecture
|
||||
if: inputs.dpkg-arch != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
|
||||
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
|
||||
|
||||
# Restrict default sources to amd64
|
||||
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
|
||||
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
|
||||
|
||||
# Add ports sources for foreign architecture
|
||||
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
|
||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
|
||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
|
||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
|
||||
EOF
|
||||
|
||||
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
|
||||
|
||||
- name: Start LLVM cache group
|
||||
shell: bash
|
||||
run: echo "::group::📦 Restoring LLVM cache"
|
||||
|
||||
- name: Check for LLVM cache
|
||||
id: cache
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/bin/clang-*
|
||||
/usr/bin/clang++-*
|
||||
/usr/bin/lld-*
|
||||
/usr/bin/llvm-*
|
||||
/usr/lib/llvm-*/
|
||||
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
|
||||
/usr/lib/x86_64-linux-gnu/libclang*.so*
|
||||
/etc/apt/sources.list.d/archive_uri-*
|
||||
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
|
||||
key: llvm-${{ steps.runner-os.outputs.slug }}-v${{ inputs.llvm-version }}-v3-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
|
||||
|
||||
- name: End LLVM cache group
|
||||
shell: bash
|
||||
run: echo "::endgroup::"
|
||||
|
||||
- name: Check and install LLVM if needed
|
||||
id: llvm-setup
|
||||
shell: bash
|
||||
run: |
|
||||
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
|
||||
|
||||
# Check both binaries and libraries exist
|
||||
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
|
||||
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
|
||||
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
|
||||
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
|
||||
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
|
||||
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
|
||||
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
|
||||
echo "needs-install=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
|
||||
|
||||
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
|
||||
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
|
||||
echo "::endgroup::"
|
||||
|
||||
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
|
||||
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
|
||||
echo "needs-install=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Prepare for additional packages
|
||||
if: inputs.extra-packages != ''
|
||||
shell: bash
|
||||
run: |
|
||||
# Update APT if LLVM was cached (installer script already does apt-get update)
|
||||
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
|
||||
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
|
||||
sudo apt-get update
|
||||
echo "::endgroup::"
|
||||
fi
|
||||
echo "::group::📦 Installing additional packages"
|
||||
|
||||
- name: Install additional packages
|
||||
if: inputs.extra-packages != ''
|
||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
|
||||
with:
|
||||
packages: ${{ inputs.extra-packages }}
|
||||
version: 1.0
|
||||
|
||||
- name: End package installation group
|
||||
if: inputs.extra-packages != ''
|
||||
shell: bash
|
||||
run: echo "::endgroup::"
|
||||
|
||||
- name: Configure LLVM environment
|
||||
id: configure
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
|
||||
|
||||
# Create symlinks
|
||||
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
|
||||
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
|
||||
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
|
||||
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
|
||||
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
|
||||
echo " ✓ Created symlinks"
|
||||
|
||||
# Setup library paths
|
||||
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
|
||||
if [ -d "$LLVM_LIB_PATH" ]; then
|
||||
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
|
||||
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
|
||||
|
||||
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
|
||||
sudo ldconfig
|
||||
echo " ✓ Configured library paths"
|
||||
else
|
||||
# Fallback to standard library location
|
||||
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
|
||||
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
|
||||
echo " ✓ Using fallback library path"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set output
|
||||
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
|
||||
echo "::endgroup::"
|
||||
echo "✅ LLVM ready: $(clang --version | head -1)"
|
226
.forgejo/actions/setup-rust/action.yml
Normal file
226
.forgejo/actions/setup-rust/action.yml
Normal file
|
@ -0,0 +1,226 @@
|
|||
name: setup-rust
|
||||
description: |
|
||||
Set up Rust toolchain with sccache for compilation caching.
|
||||
Respects rust-toolchain.toml by default or accepts explicit version override.
|
||||
|
||||
inputs:
|
||||
cache-key-suffix:
|
||||
description: 'Optional suffix for cache keys (e.g. platform identifier)'
|
||||
required: false
|
||||
default: ''
|
||||
rust-components:
|
||||
description: 'Additional Rust components to install (space-separated)'
|
||||
required: false
|
||||
default: ''
|
||||
rust-target:
|
||||
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
|
||||
required: false
|
||||
default: ''
|
||||
rust-version:
|
||||
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
|
||||
required: false
|
||||
default: '1.87.0'
|
||||
sccache-cache-limit:
|
||||
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
|
||||
required: false
|
||||
default: '2G'
|
||||
github-token:
|
||||
description: 'GitHub token for downloading sccache from GitHub releases'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
outputs:
|
||||
rust-version:
|
||||
description: 'Installed Rust version'
|
||||
value: ${{ steps.rust-setup.outputs.version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Detect runner OS
|
||||
id: runner-os
|
||||
uses: ./.forgejo/actions/detect-runner-os
|
||||
|
||||
- name: Configure Cargo environment
|
||||
shell: bash
|
||||
run: |
|
||||
# Use workspace-relative paths for better control and consistency
|
||||
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
|
||||
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
|
||||
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
|
||||
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
|
||||
|
||||
# Limit binstall resolution timeout to avoid GitHub rate limit delays
|
||||
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
|
||||
|
||||
# Ensure directories exist for first run
|
||||
mkdir -p "${{ github.workspace }}/.cargo"
|
||||
mkdir -p "${{ github.workspace }}/.sccache"
|
||||
mkdir -p "${{ github.workspace }}/target"
|
||||
mkdir -p "${{ github.workspace }}/.rustup"
|
||||
|
||||
- name: Start cache restore group
|
||||
shell: bash
|
||||
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
|
||||
|
||||
- name: Cache Cargo registry and git
|
||||
id: registry-cache
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cargo/registry/index
|
||||
.cargo/registry/cache
|
||||
.cargo/git/db
|
||||
# Registry cache saved per workflow, restored from any workflow's cache
|
||||
# Each workflow maintains its own registry that accumulates its needed crates
|
||||
key: cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ github.workflow }}
|
||||
restore-keys: |
|
||||
cargo-registry-${{ steps.runner-os.outputs.slug }}-
|
||||
|
||||
- name: Cache toolchain binaries
|
||||
id: toolchain-cache
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cargo/bin
|
||||
.rustup/toolchains
|
||||
.rustup/update-hashes
|
||||
# Shared toolchain cache across all Rust versions
|
||||
key: toolchain-${{ steps.runner-os.outputs.slug }}
|
||||
|
||||
|
||||
- name: Setup sccache
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
|
||||
- name: Cache build artifacts
|
||||
id: build-cache
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
target/**/deps
|
||||
!target/**/deps/*.rlib
|
||||
target/**/build
|
||||
target/**/.fingerprint
|
||||
target/**/incremental
|
||||
target/**/*.d
|
||||
/timelord/
|
||||
# Build artifacts - cache per code change, restore from deps when code changes
|
||||
key: >-
|
||||
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
|
||||
restore-keys: |
|
||||
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
|
||||
|
||||
- name: End cache restore group
|
||||
shell: bash
|
||||
run: echo "::endgroup::"
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
shell: bash
|
||||
run: |
|
||||
# Install rustup if not already cached
|
||||
if ! command -v rustup &> /dev/null; then
|
||||
echo "::group::📦 Installing rustup"
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
|
||||
source "$CARGO_HOME/env"
|
||||
echo "::endgroup::"
|
||||
else
|
||||
echo "✅ rustup already available"
|
||||
fi
|
||||
|
||||
# Setup the appropriate Rust version
|
||||
if [[ -n "${{ inputs.rust-version }}" ]]; then
|
||||
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
|
||||
# Set override first to prevent rust-toolchain.toml from auto-installing
|
||||
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
|
||||
|
||||
# Check if we need to install/update the toolchain
|
||||
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
|
||||
rustup update ${{ inputs.rust-version }}
|
||||
else
|
||||
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
|
||||
fi
|
||||
else
|
||||
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
|
||||
rustup show
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Configure PATH and install tools
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
run: |
|
||||
# Add .cargo/bin to PATH permanently for all subsequent steps
|
||||
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
|
||||
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
|
||||
|
||||
# Install cargo-binstall for fast binary installations
|
||||
if command -v cargo-binstall &> /dev/null; then
|
||||
echo "✅ cargo-binstall already available"
|
||||
else
|
||||
echo "::group::📦 Installing cargo-binstall"
|
||||
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||
echo "::endgroup::"
|
||||
fi
|
||||
|
||||
if command -v prek &> /dev/null; then
|
||||
echo "✅ prek already available"
|
||||
else
|
||||
echo "::group::📦 Installing prek"
|
||||
# prek isn't regularly published to crates.io, so we use git source
|
||||
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
|
||||
echo "::endgroup::"
|
||||
fi
|
||||
|
||||
if command -v timelord &> /dev/null; then
|
||||
echo "✅ timelord already available"
|
||||
else
|
||||
echo "::group::📦 Installing timelord"
|
||||
cargo-binstall -y --no-symlinks timelord-cli
|
||||
echo "::endgroup::"
|
||||
fi
|
||||
|
||||
- name: Configure sccache environment
|
||||
shell: bash
|
||||
run: |
|
||||
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
|
||||
|
||||
# Configure incremental compilation GC
|
||||
# If we restored from old cache (partial hit), clean up aggressively
|
||||
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
|
||||
echo "♻️ Partial cache hit - enabling cache cleanup"
|
||||
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Install Rust components
|
||||
if: inputs.rust-components != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "📦 Installing components: ${{ inputs.rust-components }}"
|
||||
rustup component add ${{ inputs.rust-components }}
|
||||
|
||||
- name: Install Rust target
|
||||
if: inputs.rust-target != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "📦 Installing target: ${{ inputs.rust-target }}"
|
||||
rustup target add ${{ inputs.rust-target }}
|
||||
|
||||
- name: Output version and summary
|
||||
id: rust-setup
|
||||
shell: bash
|
||||
run: |
|
||||
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
|
||||
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📋 Setup complete:"
|
||||
echo " Rust: $(rustc --version)"
|
||||
echo " Cargo: $(cargo --version)"
|
||||
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
|
||||
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"
|
46
.forgejo/actions/timelord/action.yml
Normal file
46
.forgejo/actions/timelord/action.yml
Normal file
|
@ -0,0 +1,46 @@
|
|||
name: timelord
|
||||
description: |
|
||||
Use timelord to set file timestamps
|
||||
inputs:
|
||||
key:
|
||||
description: |
|
||||
The key to use for caching the timelord data.
|
||||
This should be unique to the repository and the runner.
|
||||
required: true
|
||||
default: timelord-v0
|
||||
path:
|
||||
description: |
|
||||
The path to the directory to be timestamped.
|
||||
This should be the root of the repository.
|
||||
required: true
|
||||
default: .
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Cache timelord-cli installation
|
||||
id: cache-timelord-bin
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cargo/bin/timelord
|
||||
key: timelord-cli-v3.0.1
|
||||
- name: Install timelord-cli
|
||||
uses: https://github.com/cargo-bins/cargo-binstall@main
|
||||
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
|
||||
- run: cargo binstall timelord-cli@3.0.1
|
||||
shell: bash
|
||||
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Load timelord files
|
||||
uses: actions/cache/restore@v3
|
||||
with:
|
||||
path: /timelord/
|
||||
key: ${{ inputs.key }}
|
||||
- name: Run timelord to set timestamps
|
||||
shell: bash
|
||||
run: timelord sync --source-dir ${{ inputs.path }} --cache-dir /timelord/
|
||||
- name: Save timelord
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: /timelord/
|
||||
key: ${{ inputs.key }}
|
55
.forgejo/regsync/regsync.yml
Normal file
55
.forgejo/regsync/regsync.yml
Normal file
|
@ -0,0 +1,55 @@
|
|||
version: 1
|
||||
|
||||
x-source: &source forgejo.ellis.link/continuwuation/continuwuity
|
||||
|
||||
x-tags:
|
||||
releases: &tags-releases
|
||||
tags:
|
||||
allow:
|
||||
- "latest"
|
||||
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
|
||||
- "v[0-9]+\\.[0-9]+"
|
||||
- "v[0-9]+"
|
||||
main: &tags-main
|
||||
tags:
|
||||
allow:
|
||||
- "latest"
|
||||
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
|
||||
- "v[0-9]+\\.[0-9]+"
|
||||
- "v[0-9]+"
|
||||
- "main"
|
||||
commits: &tags-commits
|
||||
tags:
|
||||
allow:
|
||||
- "latest"
|
||||
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
|
||||
- "v[0-9]+\\.[0-9]+"
|
||||
- "v[0-9]+"
|
||||
- "main"
|
||||
- "sha-[a-f0-9]+"
|
||||
all: &tags-all
|
||||
tags:
|
||||
allow:
|
||||
- ".*"
|
||||
|
||||
# Registry credentials
|
||||
creds:
|
||||
- registry: forgejo.ellis.link
|
||||
user: "{{env \"BUILTIN_REGISTRY_USER\"}}"
|
||||
pass: "{{env \"BUILTIN_REGISTRY_PASSWORD\"}}"
|
||||
- registry: registry.gitlab.com
|
||||
user: "{{env \"GITLAB_USERNAME\"}}"
|
||||
pass: "{{env \"GITLAB_TOKEN\"}}"
|
||||
|
||||
# Global defaults
|
||||
defaults:
|
||||
parallel: 3
|
||||
interval: 2h
|
||||
digestTags: true
|
||||
|
||||
# Sync configuration - each registry gets different image sets
|
||||
sync:
|
||||
- source: *source
|
||||
target: registry.gitlab.com/continuwuity/continuwuity
|
||||
type: repository
|
||||
<<: *tags-main
|
87
.forgejo/workflows/documentation.yml
Normal file
87
.forgejo/workflows/documentation.yml
Normal file
|
@ -0,0 +1,87 @@
|
|||
name: Documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: "pages-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docs:
|
||||
name: Build and Deploy Documentation
|
||||
runs-on: ubuntu-latest
|
||||
if: secrets.CLOUDFLARE_API_TOKEN != ''
|
||||
|
||||
steps:
|
||||
- name: Sync repository
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup mdBook
|
||||
uses: https://github.com/peaceiris/actions-mdbook@v2
|
||||
with:
|
||||
mdbook-version: "latest"
|
||||
|
||||
- name: Build mdbook
|
||||
run: mdbook build
|
||||
|
||||
- name: Prepare static files for deployment
|
||||
run: |
|
||||
mkdir -p ./public/.well-known/matrix
|
||||
mkdir -p ./public/.well-known/continuwuity
|
||||
mkdir -p ./public/schema
|
||||
# Copy the Matrix .well-known files
|
||||
cp ./docs/static/server ./public/.well-known/matrix/server
|
||||
cp ./docs/static/client ./public/.well-known/matrix/client
|
||||
cp ./docs/static/client ./public/.well-known/matrix/support
|
||||
cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements
|
||||
cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json
|
||||
# Copy the custom headers file
|
||||
cp ./docs/static/_headers ./public/_headers
|
||||
echo "Copied .well-known files and _headers to ./public"
|
||||
|
||||
- name: Detect runner environment
|
||||
id: runner-env
|
||||
uses: ./.forgejo/actions/detect-runner-os
|
||||
|
||||
- name: Setup Node.js
|
||||
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
|
||||
uses: https://github.com/actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Cache npm dependencies
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ steps.runner-env.outputs.slug }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ steps.runner-env.outputs.slug }}-node-
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install --save-dev wrangler@latest
|
||||
|
||||
- name: Deploy to Cloudflare Pages (Production)
|
||||
if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
|
||||
- name: Deploy to Cloudflare Pages (Preview)
|
||||
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
124
.forgejo/workflows/element.yml
Normal file
124
.forgejo/workflows/element.yml
Normal file
|
@ -0,0 +1,124 @@
|
|||
name: Deploy Element Web
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: "element-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
name: 🏗️ Build and Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: 📦 Setup Node.js
|
||||
uses: https://github.com/actions/setup-node@v4
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: 🔨 Clone, setup, and build Element Web
|
||||
run: |
|
||||
echo "Cloning Element Web..."
|
||||
git clone https://github.com/maunium/element-web
|
||||
cd element-web
|
||||
git checkout develop
|
||||
git pull
|
||||
|
||||
echo "Cloning matrix-js-sdk..."
|
||||
git clone https://github.com/matrix-org/matrix-js-sdk.git
|
||||
|
||||
echo "Installing Yarn..."
|
||||
npm install -g yarn
|
||||
|
||||
echo "Installing dependencies..."
|
||||
yarn install
|
||||
|
||||
echo "Preparing build environment..."
|
||||
mkdir -p .home
|
||||
|
||||
echo "Cleaning up specific node_modules paths..."
|
||||
rm -rf node_modules/@types/eslint-scope/ matrix-*-sdk/node_modules/@types/eslint-scope || echo "Cleanup paths not found, continuing."
|
||||
|
||||
echo "Getting matrix-js-sdk commit hash..."
|
||||
cd matrix-js-sdk
|
||||
jsver=$(git rev-parse HEAD)
|
||||
jsver=${jsver:0:12}
|
||||
cd ..
|
||||
echo "matrix-js-sdk version hash: $jsver"
|
||||
|
||||
echo "Getting element-web commit hash..."
|
||||
ver=$(git rev-parse HEAD)
|
||||
ver=${ver:0:12}
|
||||
echo "element-web version hash: $ver"
|
||||
|
||||
chmod +x ./build-sh
|
||||
|
||||
export VERSION="$ver-js-$jsver"
|
||||
echo "Building Element Web version: $VERSION"
|
||||
./build-sh
|
||||
|
||||
echo "Checking for build output..."
|
||||
ls -la webapp/
|
||||
|
||||
- name: ⚙️ Create config.json
|
||||
run: |
|
||||
cat <<EOF > ./element-web/webapp/config.json
|
||||
{
|
||||
"default_server_name": "continuwuity.org",
|
||||
"default_server_config": {
|
||||
"m.homeserver": {
|
||||
"base_url": "https://matrix.continuwuity.org"
|
||||
}
|
||||
},
|
||||
"default_country_code": "GB",
|
||||
"default_theme": "dark",
|
||||
"mobile_guide_toast": false,
|
||||
"show_labs_settings": true,
|
||||
"room_directory": [
|
||||
"continuwuity.org",
|
||||
"matrixrooms.info"
|
||||
],
|
||||
"settings_defaults": {
|
||||
"UIFeature.urlPreviews": true,
|
||||
"UIFeature.feedback": false,
|
||||
"UIFeature.voip": false,
|
||||
"UIFeature.shareQrCode": false,
|
||||
"UIFeature.shareSocial": false,
|
||||
"UIFeature.locationSharing": false,
|
||||
"enableSyntaxHighlightLanguageDetection": true
|
||||
},
|
||||
"features": {
|
||||
"feature_pinning": true,
|
||||
"feature_custom_themes": true
|
||||
}
|
||||
}
|
||||
EOF
|
||||
echo "Created ./element-web/webapp/config.json"
|
||||
cat ./element-web/webapp/config.json
|
||||
|
||||
- name: 📤 Upload Artifact
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
with:
|
||||
name: element-web
|
||||
path: ./element-web/webapp/
|
||||
retention-days: 14
|
||||
|
||||
- name: 🛠️ Install Wrangler
|
||||
run: npm install --save-dev wrangler@latest
|
||||
|
||||
- name: 🚀 Deploy to Cloudflare Pages
|
||||
if: vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||
id: deploy
|
||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: >-
|
||||
pages deploy ./element-web/webapp
|
||||
--branch="${{ github.ref == 'refs/heads/main' && 'main' || github.head_ref || github.ref_name }}"
|
||||
--commit-dirty=true
|
||||
--project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
|
47
.forgejo/workflows/mirror-images.yml
Normal file
47
.forgejo/workflows/mirror-images.yml
Normal file
|
@ -0,0 +1,47 @@
|
|||
name: Mirror Container Images
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run every 2 hours
|
||||
- cron: "0 */2 * * *"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Dry run (check only, no actual mirroring)'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
concurrency:
|
||||
group: "mirror-images"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
mirror-images:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILTIN_REGISTRY_USER: ${{ vars.BUILTIN_REGISTRY_USER }}
|
||||
BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }}
|
||||
GITLAB_USERNAME: ${{ vars.GITLAB_USERNAME }}
|
||||
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install regctl
|
||||
uses: https://forgejo.ellis.link/continuwuation/regclient-actions/regctl-installer@main
|
||||
with:
|
||||
binary: regsync
|
||||
|
||||
- name: Check what images need mirroring
|
||||
run: |
|
||||
echo "Checking images that need mirroring..."
|
||||
regsync check -c .forgejo/regsync/regsync.yml -v info
|
||||
|
||||
- name: Mirror images
|
||||
if: ${{ !inputs.dry_run }}
|
||||
run: |
|
||||
echo "Starting image mirroring..."
|
||||
regsync once -c .forgejo/regsync/regsync.yml -v info
|
83
.forgejo/workflows/prek-checks.yml
Normal file
83
.forgejo/workflows/prek-checks.yml
Normal file
|
@ -0,0 +1,83 @@
|
|||
name: Checks / Prek
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
fast-checks:
|
||||
name: Pre-commit & Formatting
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Rust nightly
|
||||
uses: ./.forgejo/actions/setup-rust
|
||||
with:
|
||||
rust-version: nightly
|
||||
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
||||
|
||||
- name: Run prek
|
||||
run: |
|
||||
prek run \
|
||||
--all-files \
|
||||
--hook-stage manual \
|
||||
--show-diff-on-failure \
|
||||
--color=always \
|
||||
-v
|
||||
|
||||
- name: Check Rust formatting
|
||||
run: |
|
||||
cargo +nightly fmt --all -- --check && \
|
||||
echo "✅ Formatting check passed" || \
|
||||
exit 1
|
||||
|
||||
clippy-and-tests:
|
||||
name: Clippy and Cargo Tests
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup LLVM
|
||||
uses: ./.forgejo/actions/setup-llvm-with-apt
|
||||
with:
|
||||
extra-packages: liburing-dev liburing2
|
||||
|
||||
- name: Setup Rust with caching
|
||||
uses: ./.forgejo/actions/setup-rust
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
||||
|
||||
- name: Run Clippy lints
|
||||
run: |
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--no-deps \
|
||||
--profile test \
|
||||
-- \
|
||||
-D warnings
|
||||
|
||||
- name: Run Cargo tests
|
||||
run: |
|
||||
cargo test \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--no-fail-fast
|
333
.forgejo/workflows/release-image.yml
Normal file
333
.forgejo/workflows/release-image.yml
Normal file
|
@ -0,0 +1,333 @@
|
|||
name: Release Docker Image
|
||||
concurrency:
|
||||
group: "release-image-${{ github.ref }}"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitignore"
|
||||
- "renovate.json"
|
||||
- "pkg/**"
|
||||
- "docs/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitignore"
|
||||
- "renovate.json"
|
||||
- "pkg/**"
|
||||
- "docs/**"
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
BUILTIN_REGISTRY: forgejo.ellis.link
|
||||
BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}"
|
||||
|
||||
jobs:
|
||||
define-variables:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
images: ${{ steps.var.outputs.images }}
|
||||
images_list: ${{ steps.var.outputs.images_list }}
|
||||
build_matrix: ${{ steps.var.outputs.build_matrix }}
|
||||
|
||||
steps:
|
||||
- name: Setting variables
|
||||
uses: https://github.com/actions/github-script@v7
|
||||
id: var
|
||||
with:
|
||||
script: |
|
||||
const githubRepo = '${{ github.repository }}'.toLowerCase()
|
||||
const repoId = githubRepo.split('/')[1]
|
||||
|
||||
core.setOutput('github_repository', githubRepo)
|
||||
const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo
|
||||
let images = []
|
||||
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
||||
images.push(builtinImage)
|
||||
} else {
|
||||
// Fallback to official registry for forks/PRs without credentials
|
||||
images.push('forgejo.ellis.link/continuwuation/continuwuity')
|
||||
}
|
||||
core.setOutput('images', images.join("\n"))
|
||||
core.setOutput('images_list', images.join(","))
|
||||
const platforms = ['linux/amd64', 'linux/arm64']
|
||||
core.setOutput('build_matrix', JSON.stringify({
|
||||
platform: platforms,
|
||||
target_cpu: ['base'],
|
||||
include: platforms.map(platform => { return {
|
||||
platform,
|
||||
slug: platform.replace('/', '-')
|
||||
}})
|
||||
}))
|
||||
|
||||
build-image:
|
||||
runs-on: dind
|
||||
needs: define-variables
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
attestations: write
|
||||
id-token: write
|
||||
strategy:
|
||||
matrix:
|
||||
{
|
||||
"target_cpu": ["base"],
|
||||
"profile": ["release"],
|
||||
"include":
|
||||
[
|
||||
{ "platform": "linux/amd64", "slug": "linux-amd64" },
|
||||
{ "platform": "linux/arm64", "slug": "linux-arm64" },
|
||||
],
|
||||
"platform": ["linux/amd64", "linux/arm64"],
|
||||
}
|
||||
|
||||
steps:
|
||||
- name: Echo strategy
|
||||
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
|
||||
- name: Echo matrix
|
||||
run: echo '${{ toJSON(matrix) }}'
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Install rust
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
id: rust-toolchain
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
||||
- name: Set up QEMU
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
|
||||
- name: Extract metadata (labels, annotations) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{needs.define-variables.outputs.images}}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
|
||||
|
||||
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
|
||||
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
|
||||
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
|
||||
# It will not push images generated from a pull request
|
||||
- name: Get short git commit SHA
|
||||
id: sha
|
||||
run: |
|
||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
||||
echo "Short SHA: $calculatedSha"
|
||||
- name: Get Git commit timestamps
|
||||
run: |
|
||||
timestamp=$(git log -1 --pretty=%ct)
|
||||
echo "TIMESTAMP=$timestamp" >> $GITHUB_ENV
|
||||
echo "Commit timestamp: $timestamp"
|
||||
|
||||
- uses: ./.forgejo/actions/timelord
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
with:
|
||||
key: timelord-v0
|
||||
path: .
|
||||
|
||||
- name: Cache Rust registry
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
.cargo/git
|
||||
.cargo/git/checkouts
|
||||
.cargo/registry
|
||||
.cargo/registry/src
|
||||
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo target
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
id: cache-cargo-target
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
|
||||
- name: Cache apt cache
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
id: cache-apt
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
var-cache-apt-${{ matrix.slug }}
|
||||
key: var-cache-apt-${{ matrix.slug }}
|
||||
- name: Cache apt lib
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
id: cache-apt-lib
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
var-lib-apt-${{ matrix.slug }}
|
||||
key: var-lib-apt-${{ matrix.slug }}
|
||||
- name: inject cache into docker
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
".cargo/registry": "/usr/local/cargo/registry",
|
||||
".cargo/git/db": "/usr/local/cargo/git/db",
|
||||
"cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}": {
|
||||
"target": "/app/target",
|
||||
"id": "cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}"
|
||||
},
|
||||
"var-cache-apt-${{ matrix.slug }}": "/var/cache/apt",
|
||||
"var-lib-apt-${{ matrix.slug }}": "/var/lib/apt"
|
||||
}
|
||||
skip-extraction: ${{ steps.cache.outputs.cache-hit }}
|
||||
|
||||
- name: Build and push Docker image by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: "docker/Dockerfile"
|
||||
build-args: |
|
||||
GIT_COMMIT_HASH=${{ github.sha }}
|
||||
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
||||
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
||||
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
sbom: true
|
||||
outputs: |
|
||||
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', needs.define-variables.outputs.images_list) || format('type=image,"name={0}",push=false', needs.define-variables.outputs.images_list) }}
|
||||
type=local,dest=/tmp/binaries
|
||||
env:
|
||||
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
||||
|
||||
# For publishing multi-platform manifests
|
||||
- name: Export digest
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
# Binary extracted via local output for all builds
|
||||
- name: Rename extracted binary
|
||||
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
|
||||
- name: Upload binary artifact
|
||||
uses: forgejo/upload-artifact@v4
|
||||
with:
|
||||
name: conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
path: /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload digest
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: forgejo/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ matrix.slug }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 5
|
||||
|
||||
merge:
|
||||
runs-on: dind
|
||||
needs: [define-variables, build-image]
|
||||
steps:
|
||||
- name: Download digests
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: forgejo/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
||||
|
||||
- name: Extract metadata (tags) for Docker
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
tags: |
|
||||
type=semver,pattern={{version}},prefix=v
|
||||
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
|
||||
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
|
||||
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }}
|
||||
type=ref,event=pr
|
||||
type=sha,format=long
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
images: ${{needs.define-variables.outputs.images}}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
|
||||
- name: Create manifest list and push
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
working-directory: /tmp/digests
|
||||
env:
|
||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||
shell: bash
|
||||
run: |
|
||||
IFS=$'\n'
|
||||
IMAGES_LIST=($IMAGES)
|
||||
ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS)
|
||||
TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS)
|
||||
for REPO in "${IMAGES_LIST[@]}"; do
|
||||
docker buildx imagetools create \
|
||||
$(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \
|
||||
$(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \
|
||||
$(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done)
|
||||
done
|
||||
|
||||
- name: Inspect image
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
env:
|
||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||
shell: bash
|
||||
run: |
|
||||
IMAGES_LIST=($IMAGES)
|
||||
for REPO in "${IMAGES_LIST[@]}"; do
|
||||
docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }}
|
||||
done
|
111
.forgejo/workflows/renovate.yml
Normal file
111
.forgejo/workflows/renovate.yml
Normal file
|
@ -0,0 +1,111 @@
|
|||
name: Maintenance / Renovate
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run at 5am UTC daily to avoid late-night dev
|
||||
- cron: '0 5 * * *'
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dryRun:
|
||||
description: 'Dry run mode'
|
||||
required: false
|
||||
default: null
|
||||
type: choice
|
||||
options:
|
||||
- null
|
||||
- 'extract'
|
||||
- 'lookup'
|
||||
- 'full'
|
||||
logLevel:
|
||||
description: 'Log level'
|
||||
required: false
|
||||
default: 'info'
|
||||
type: choice
|
||||
options:
|
||||
- 'info'
|
||||
- 'warning'
|
||||
- 'critical'
|
||||
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# Re-run when config changes
|
||||
- '.forgejo/workflows/renovate.yml'
|
||||
- 'renovate.json'
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
name: Renovate
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/renovatebot/renovate:41
|
||||
options: --tmpfs /tmp:exec
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: print node heap
|
||||
run: /usr/local/renovate/node -e 'console.log(`node heap limit = ${require("v8").getHeapStatistics().heap_size_limit / (1024 * 1024)} Mb`)'
|
||||
|
||||
- name: Restore renovate repo cache
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/renovate/cache/renovate/repository
|
||||
key: repo-cache-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
repo-cache-
|
||||
|
||||
- name: Restore renovate package cache
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/renovate/cache/renovate/renovate-cache-sqlite
|
||||
key: package-cache-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
package-cache-
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: https://github.com/renovatebot/github-action@v43.0.11
|
||||
env:
|
||||
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
|
||||
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
|
||||
|
||||
RENOVATE_PLATFORM: forgejo
|
||||
RENOVATE_ENDPOINT: ${{ github.server_url }}
|
||||
RENOVATE_AUTODISCOVER: 'false'
|
||||
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
|
||||
|
||||
RENOVATE_GIT_TIMEOUT: 60000
|
||||
|
||||
RENOVATE_REQUIRE_CONFIG: 'required'
|
||||
RENOVATE_ONBOARDING: 'false'
|
||||
|
||||
RENOVATE_PR_COMMITS_PER_RUN_LIMIT: 3
|
||||
|
||||
RENOVATE_GITHUB_TOKEN_WARN: 'false'
|
||||
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
|
||||
GITHUB_COM_TOKEN: ${{ secrets.GH_PUBLIC_RO }}
|
||||
|
||||
RENOVATE_REPOSITORY_CACHE: 'enabled'
|
||||
RENOVATE_X_SQLITE_PACKAGE_CACHE: true
|
||||
|
||||
- name: Save renovate repo cache
|
||||
if: always() && env.RENOVATE_DRY_RUN != 'full'
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/renovate/cache/renovate/repository
|
||||
key: repo-cache-${{ github.run_id }}
|
||||
|
||||
- name: Save renovate package cache
|
||||
if: always() && env.RENOVATE_DRY_RUN != 'full'
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/renovate/cache/renovate/renovate-cache-sqlite
|
||||
key: package-cache-${{ github.run_id }}
|
|
@ -5,3 +5,5 @@ f419c64aca300a338096b4e0db4c73ace54f23d0
|
|||
# use chain_width 60
|
||||
162948313c212193965dece50b816ef0903172ba
|
||||
5998a0d883d31b866f7c8c46433a8857eae51a89
|
||||
# trailing whitespace and newlines
|
||||
46c193e74b2ce86c48ce802333a0aabce37fd6e9
|
||||
|
|
4
.github/FUNDING.yml
vendored
Normal file
4
.github/FUNDING.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
github: [JadedBlueEyes, nexy7574]
|
||||
custom:
|
||||
- https://ko-fi.com/nexy7574
|
||||
- https://ko-fi.com/JadedBlueEyes
|
717
.github/workflows/ci.yml
vendored
717
.github/workflows/ci.yml
vendored
|
@ -1,717 +0,0 @@
|
|||
name: CI and Artifacts
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths-ignore:
|
||||
- '.gitlab-ci.yml'
|
||||
- '.gitignore'
|
||||
- 'renovate.json'
|
||||
- 'debian/**'
|
||||
- 'docker/**'
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- '*'
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref || github.ref_name }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# Required to make some things output color
|
||||
TERM: ansi
|
||||
# Publishing to my nix binary cache
|
||||
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
||||
# conduwuit.cachix.org
|
||||
CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
# Just in case incremental is still being set to true, speeds up CI
|
||||
CARGO_INCREMENTAL: 0
|
||||
# Custom nix binary cache if fork is being used
|
||||
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
||||
ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }}
|
||||
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
|
||||
NIX_CONFIG: |
|
||||
show-trace = true
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||
GH_REF_NAME: ${{ github.ref_name }}
|
||||
WEBSERVER_DIR_NAME: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: Test
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Setup SSH web publish
|
||||
env:
|
||||
web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
mkdir -p -v ~/.ssh
|
||||
|
||||
echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts
|
||||
echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519
|
||||
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
|
||||
cat >>~/.ssh/config <<END
|
||||
Host website
|
||||
HostName ${{ secrets.WEB_UPLOAD_SSH_HOSTNAME }}
|
||||
User ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
StrictHostKeyChecking yes
|
||||
AddKeysToAgent no
|
||||
ForwardX11 no
|
||||
BatchMode yes
|
||||
END
|
||||
|
||||
echo "Checking connection"
|
||||
ssh -q website "echo test" || ssh -q website "echo test"
|
||||
|
||||
echo "Creating commit rev directory on web server"
|
||||
ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/"
|
||||
ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/"
|
||||
|
||||
echo "SSH_WEBSITE=1" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Sync repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Tag comparison check
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }}
|
||||
run: |
|
||||
# Tag mismatch with latest repo tag check to prevent potential downgrades
|
||||
LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then
|
||||
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.'
|
||||
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Prepare build environment
|
||||
run: |
|
||||
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||
direnv allow
|
||||
nix develop .#all-features --command true
|
||||
|
||||
- name: Cache CI dependencies
|
||||
run: |
|
||||
bin/nix-build-and-cache ci
|
||||
bin/nix-build-and-cache just '.#devShells.x86_64-linux.default'
|
||||
bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features'
|
||||
bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic'
|
||||
|
||||
# use rust-cache
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||
# releases and tags
|
||||
#if: ${{ !startsWith(github.ref, 'refs/tags/') }}
|
||||
with:
|
||||
cache-all-crates: "true"
|
||||
cache-on-failure: "true"
|
||||
cache-targets: "true"
|
||||
|
||||
- name: Run CI tests
|
||||
env:
|
||||
CARGO_PROFILE: "test"
|
||||
run: |
|
||||
direnv exec . engage > >(tee -a test_output.log)
|
||||
|
||||
- name: Run Complement tests
|
||||
env:
|
||||
CARGO_PROFILE: "test"
|
||||
run: |
|
||||
# the nix devshell sets $COMPLEMENT_SRC, so "/dev/null" is no-op
|
||||
direnv exec . bin/complement "/dev/null" complement_test_logs.jsonl complement_test_results.jsonl > >(tee -a test_output.log)
|
||||
cp -v -f result complement_oci_image.tar.gz
|
||||
|
||||
- name: Upload Complement OCI image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: complement_oci_image.tar.gz
|
||||
path: complement_oci_image.tar.gz
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
- name: Upload Complement logs
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: complement_test_logs.jsonl
|
||||
path: complement_test_logs.jsonl
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload Complement results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: complement_test_results.jsonl
|
||||
path: complement_test_results.jsonl
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Diff Complement results with checked-in repo results
|
||||
run: |
|
||||
diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log)
|
||||
|
||||
- name: Update Job Summary
|
||||
env:
|
||||
GH_JOB_STATUS: ${{ job.status }}
|
||||
if: success() || failure()
|
||||
run: |
|
||||
if [ ${GH_JOB_STATUS} == 'success' ]; then
|
||||
echo '# ✅ CI completed suwuccessfully' >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo '# ❌ CI failed (last 100 lines of output)' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```diff' >> $GITHUB_STEP_SUMMARY
|
||||
tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: self-hosted
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: aarch64-linux-musl
|
||||
- target: x86_64-linux-musl
|
||||
steps:
|
||||
- name: Sync repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup SSH web publish
|
||||
env:
|
||||
web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
run: |
|
||||
mkdir -p -v ~/.ssh
|
||||
|
||||
echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts
|
||||
echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519
|
||||
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
|
||||
cat >>~/.ssh/config <<END
|
||||
Host website
|
||||
HostName ${{ secrets.WEB_UPLOAD_SSH_HOSTNAME }}
|
||||
User ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
StrictHostKeyChecking yes
|
||||
AddKeysToAgent no
|
||||
ForwardX11 no
|
||||
BatchMode yes
|
||||
END
|
||||
|
||||
echo "Checking connection"
|
||||
ssh -q website "echo test" || ssh -q website "echo test"
|
||||
|
||||
echo "SSH_WEBSITE=1" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Prepare build environment
|
||||
run: |
|
||||
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||
direnv allow
|
||||
nix develop .#all-features --command true --impure
|
||||
|
||||
# use rust-cache
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||
# releases and tags
|
||||
#if: ${{ !startsWith(github.ref, 'refs/tags/') }}
|
||||
with:
|
||||
cache-all-crates: "true"
|
||||
cache-on-failure: "true"
|
||||
cache-targets: "true"
|
||||
|
||||
- name: Build static ${{ matrix.target }}-all-features
|
||||
run: |
|
||||
if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]]
|
||||
then
|
||||
CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl"
|
||||
elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]]
|
||||
then
|
||||
CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl"
|
||||
fi
|
||||
|
||||
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
|
||||
|
||||
bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features
|
||||
|
||||
mkdir -v -p target/release/
|
||||
mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/
|
||||
cp -v -f result/bin/conduwuit target/release/conduwuit
|
||||
cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit
|
||||
direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb
|
||||
mv -v target/release/conduwuit static-${{ matrix.target }}
|
||||
mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb
|
||||
|
||||
- name: Build static x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl"
|
||||
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
|
||||
|
||||
bin/nix-build-and-cache just .#static-x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||
|
||||
mkdir -v -p target/release/
|
||||
mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/
|
||||
cp -v -f result/bin/conduwuit target/release/conduwuit
|
||||
cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit
|
||||
direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb
|
||||
mv -v target/release/conduwuit static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||
mv -v target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb x86_64-linux-musl-x86_64-haswell-optimised.deb
|
||||
|
||||
# quick smoke test of the x86_64 static release binary
|
||||
- name: Quick smoke test the x86_64 static release binary
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
# GH actions default runners are x86_64 only
|
||||
if file result/bin/conduwuit | grep x86-64; then
|
||||
result/bin/conduwuit --version
|
||||
result/bin/conduwuit --help
|
||||
result/bin/conduwuit -Oserver_name="'$(date -u +%s).local'" -Odatabase_path="'/tmp/$(date -u +%s)'" --execute "server admin-notice awawawawawawawawawawa" --execute "server memory-usage" --execute "server shutdown"
|
||||
fi
|
||||
|
||||
- name: Build static debug ${{ matrix.target }}-all-features
|
||||
run: |
|
||||
if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]]
|
||||
then
|
||||
CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl"
|
||||
elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]]
|
||||
then
|
||||
CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl"
|
||||
fi
|
||||
|
||||
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
|
||||
|
||||
bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features-debug
|
||||
|
||||
# > warning: dev profile is not supported and will be a hard error in the future. cargo-deb is for making releases, and it doesn't make sense to use it with dev profiles.
|
||||
# so we need to coerce cargo-deb into thinking this is a release binary
|
||||
mkdir -v -p target/release/
|
||||
mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/
|
||||
cp -v -f result/bin/conduwuit target/release/conduwuit
|
||||
cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit
|
||||
direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}-debug.deb
|
||||
mv -v target/release/conduwuit static-${{ matrix.target }}-debug
|
||||
mv -v target/release/${{ matrix.target }}-debug.deb ${{ matrix.target }}-debug.deb
|
||||
|
||||
# quick smoke test of the x86_64 static debug binary
|
||||
- name: Run x86_64 static debug binary
|
||||
run: |
|
||||
# GH actions default runners are x86_64 only
|
||||
if file result/bin/conduwuit | grep x86-64; then
|
||||
result/bin/conduwuit --version
|
||||
fi
|
||||
|
||||
# check validity of produced deb package, invalid debs will error on these commands
|
||||
- name: Validate produced deb package
|
||||
run: |
|
||||
# List contents
|
||||
dpkg-deb --contents ${{ matrix.target }}.deb
|
||||
dpkg-deb --contents ${{ matrix.target }}-debug.deb
|
||||
# List info
|
||||
dpkg-deb --info ${{ matrix.target }}.deb
|
||||
dpkg-deb --info ${{ matrix.target }}-debug.deb
|
||||
|
||||
- name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
with:
|
||||
name: static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||
path: static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload static-${{ matrix.target }}-all-features to GitHub
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: static-${{ matrix.target }}
|
||||
path: static-${{ matrix.target }}
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload static deb ${{ matrix.target }}-all-features to GitHub
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: deb-${{ matrix.target }}
|
||||
path: ${{ matrix.target }}.deb
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
- name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||
scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||
fi
|
||||
|
||||
- name: Upload static-${{ matrix.target }}-all-features to webserver
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
chmod +x static-${{ matrix.target }}
|
||||
scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}
|
||||
fi
|
||||
|
||||
- name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/x86_64-linux-musl-x86_64-haswell-optimised.deb
|
||||
fi
|
||||
|
||||
- name: Upload static deb ${{ matrix.target }}-all-features to webserver
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}.deb
|
||||
fi
|
||||
|
||||
- name: Upload static-${{ matrix.target }}-debug-all-features to GitHub
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: static-${{ matrix.target }}-debug
|
||||
path: static-${{ matrix.target }}-debug
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload static deb ${{ matrix.target }}-debug-all-features to GitHub
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: deb-${{ matrix.target }}-debug
|
||||
path: ${{ matrix.target }}-debug.deb
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
- name: Upload static-${{ matrix.target }}-debug-all-features to webserver
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}-debug
|
||||
fi
|
||||
|
||||
- name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}-debug.deb
|
||||
fi
|
||||
|
||||
- name: Build OCI image ${{ matrix.target }}-all-features
|
||||
run: |
|
||||
bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features
|
||||
|
||||
cp -v -f result oci-image-${{ matrix.target }}.tar.gz
|
||||
|
||||
- name: Build OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
bin/nix-build-and-cache just .#oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||
|
||||
cp -v -f result oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||
|
||||
- name: Build debug OCI image ${{ matrix.target }}-all-features
|
||||
run: |
|
||||
bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features-debug
|
||||
|
||||
cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz
|
||||
|
||||
- name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||
path: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
- name: Upload OCI image ${{ matrix.target }}-all-features to GitHub
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: oci-image-${{ matrix.target }}
|
||||
path: oci-image-${{ matrix.target }}.tar.gz
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
- name: Upload OCI image ${{ matrix.target }}-debug-all-features to GitHub
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: oci-image-${{ matrix.target }}-debug
|
||||
path: oci-image-${{ matrix.target }}-debug.tar.gz
|
||||
if-no-files-found: error
|
||||
compression-level: 0
|
||||
|
||||
- name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz to webserver
|
||||
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||
fi
|
||||
|
||||
- name: Upload OCI image ${{ matrix.target }}-all-features to webserver
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}.tar.gz
|
||||
fi
|
||||
|
||||
- name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver
|
||||
run: |
|
||||
if [ ! -z $SSH_WEBSITE ]; then
|
||||
scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz
|
||||
fi
|
||||
|
||||
variables:
|
||||
outputs:
|
||||
github_repository: ${{ steps.var.outputs.github_repository }}
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Setting global variables
|
||||
uses: actions/github-script@v7
|
||||
id: var
|
||||
with:
|
||||
script: |
|
||||
core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase())
|
||||
docker:
|
||||
name: Docker publish
|
||||
runs-on: self-hosted
|
||||
needs: [build, variables, tests]
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]'
|
||||
env:
|
||||
DOCKER_HUB_REPO: docker.io/${{ needs.variables.outputs.github_repository }}
|
||||
GHCR_REPO: ghcr.io/${{ needs.variables.outputs.github_repository }}
|
||||
GLCR_REPO: registry.gitlab.com/conduwuit/conduwuit
|
||||
UNIQUE_TAG: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||
BRANCH_TAG: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
|
||||
|
||||
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
||||
GHCR_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}"
|
||||
steps:
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ vars.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitLab Container Registry
|
||||
if: ${{ (vars.GITLAB_USERNAME != '') && (env.GITLAB_TOKEN != '') }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: registry.gitlab.com
|
||||
username: ${{ vars.GITLAB_USERNAME }}
|
||||
password: ${{ secrets.GITLAB_TOKEN }}
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: "oci*"
|
||||
|
||||
- name: Move OCI images into position
|
||||
run: |
|
||||
mv -v oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised/*.tar.gz oci-image-amd64-haswell-optimised.tar.gz
|
||||
mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz
|
||||
mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz
|
||||
mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz
|
||||
mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz
|
||||
|
||||
- name: Load and push amd64 haswell image
|
||||
run: |
|
||||
docker load -i oci-image-amd64-haswell-optimised.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
|
||||
- name: Load and push amd64 image
|
||||
run: |
|
||||
docker load -i oci-image-amd64.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
|
||||
- name: Load and push arm64 image
|
||||
run: |
|
||||
docker load -i oci-image-arm64v8.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||
fi
|
||||
|
||||
- name: Load and push amd64 debug image
|
||||
run: |
|
||||
docker load -i oci-image-amd64-debug.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
|
||||
- name: Load and push arm64 debug image
|
||||
run: |
|
||||
docker load -i oci-image-arm64v8-debug.tar.gz
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||
fi
|
||||
|
||||
- name: Create Docker haswell manifests
|
||||
run: |
|
||||
# Dockerhub Container Registry
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
# GitHub Container Registry
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
# GitLab Container Registry
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
fi
|
||||
|
||||
- name: Create Docker combined manifests
|
||||
run: |
|
||||
# Dockerhub Container Registry
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
# GitHub Container Registry
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker manifest create ${GHCR_REPO}:${BRANCH_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
# GitLab Container Registry
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
docker manifest create ${GLCR_REPO}:${BRANCH_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||
fi
|
||||
|
||||
- name: Create Docker combined debug manifests
|
||||
run: |
|
||||
# Dockerhub Container Registry
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
# GitHub Container Registry
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
# GitLab Container Registry
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||
fi
|
||||
|
||||
- name: Push manifests to Docker registries
|
||||
run: |
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}
|
||||
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}
|
||||
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-debug
|
||||
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-debug
|
||||
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-haswell
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}
|
||||
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}
|
||||
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-debug
|
||||
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-debug
|
||||
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-haswell
|
||||
fi
|
||||
|
||||
- name: Add Image Links to Job Summary
|
||||
run: |
|
||||
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
if [ $GHCR_ENABLED = "true" ]; then
|
||||
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
if [ ! -z $GITLAB_TOKEN ]; then
|
||||
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
41
.github/workflows/docker-hub-description.yml
vendored
41
.github/workflows/docker-hub-description.yml
vendored
|
@ -1,41 +0,0 @@
|
|||
name: Update Docker Hub Description
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- README.md
|
||||
- .github/workflows/docker-hub-description.yml
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
dockerHubDescription:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' && (vars.DOCKER_USERNAME != '') }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setting variables
|
||||
uses: actions/github-script@v7
|
||||
id: var
|
||||
with:
|
||||
script: |
|
||||
const githubRepo = '${{ github.repository }}'.toLowerCase()
|
||||
const repoId = githubRepo.split('/')[1]
|
||||
|
||||
core.setOutput('github_repository', githubRepo)
|
||||
const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId
|
||||
core.setOutput('docker_repo', dockerRepo)
|
||||
|
||||
- name: Docker Hub Description
|
||||
uses: peter-evans/dockerhub-description@v4
|
||||
with:
|
||||
username: ${{ vars.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
repository: ${{ steps.var.outputs.docker_repo }}
|
||||
short-description: ${{ github.event.repository.description }}
|
||||
enable-url-completion: true
|
104
.github/workflows/documentation.yml
vendored
104
.github/workflows/documentation.yml
vendored
|
@ -1,104 +0,0 @@
|
|||
name: Documentation and GitHub Pages
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Required to make some things output color
|
||||
TERM: ansi
|
||||
# Publishing to my nix binary cache
|
||||
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
||||
# conduwuit.cachix.org
|
||||
CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
# Custom nix binary cache if fork is being used
|
||||
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
||||
ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }}
|
||||
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
|
||||
NIX_CONFIG: |
|
||||
show-trace = true
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
docs:
|
||||
name: Documentation and GitHub Pages
|
||||
runs-on: self-hosted
|
||||
|
||||
permissions:
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
steps:
|
||||
- name: Sync repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup GitHub Pages
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request')
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Prepare build environment
|
||||
run: |
|
||||
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||
direnv allow
|
||||
nix develop --command true
|
||||
|
||||
- name: Cache CI dependencies
|
||||
run: |
|
||||
bin/nix-build-and-cache ci
|
||||
|
||||
- name: Run lychee and markdownlint
|
||||
run: |
|
||||
direnv exec . engage just lints lychee
|
||||
direnv exec . engage just lints markdownlint
|
||||
|
||||
- name: Build documentation (book)
|
||||
run: |
|
||||
bin/nix-build-and-cache just .#book
|
||||
|
||||
cp -r --dereference result public
|
||||
chmod u+w -R public
|
||||
|
||||
- name: Upload generated documentation (book) as normal artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: public
|
||||
path: public
|
||||
if-no-files-found: error
|
||||
# don't compress again
|
||||
compression-level: 0
|
||||
|
||||
- name: Upload generated documentation (book) as GitHub Pages artifact
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request')
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: public
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request')
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
118
.github/workflows/release.yml
vendored
118
.github/workflows/release.yml
vendored
|
@ -1,118 +0,0 @@
|
|||
name: Upload Release Assets
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Tag to release'
|
||||
required: true
|
||||
type: string
|
||||
action_id:
|
||||
description: 'Action ID of the CI run'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
env:
|
||||
GH_EVENT_NAME: ${{ github.event_name }}
|
||||
GH_EVENT_INPUTS_ACTION_ID: ${{ github.event.inputs.action_id }}
|
||||
GH_EVENT_INPUTS_TAG: ${{ github.event.inputs.tag }}
|
||||
GH_REPOSITORY: ${{ github.repository }}
|
||||
GH_SHA: ${{ github.sha }}
|
||||
GH_TAG: ${{ github.event.release.tag_name }}
|
||||
|
||||
steps:
|
||||
- name: get latest ci id
|
||||
id: get_ci_id
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
if [ "${GH_EVENT_NAME}" == "workflow_dispatch" ]; then
|
||||
id="${GH_EVENT_INPUTS_ACTION_ID}"
|
||||
tag="${GH_EVENT_INPUTS_TAG}"
|
||||
else
|
||||
# get all runs of the ci workflow
|
||||
json=$(gh api "repos/${GH_REPOSITORY}/actions/workflows/ci.yml/runs")
|
||||
|
||||
# find first run that is github sha and status is completed
|
||||
id=$(echo "$json" | jq ".workflow_runs[] | select(.head_sha == \"${GH_SHA}\" and .status == \"completed\") | .id" | head -n 1)
|
||||
|
||||
if [ ! "$id" ]; then
|
||||
echo "No completed runs found"
|
||||
echo "ci_id=0" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
tag="${GH_TAG}"
|
||||
fi
|
||||
|
||||
echo "ci_id=$id" >> "$GITHUB_OUTPUT"
|
||||
echo "tag=$tag" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: get latest ci artifacts
|
||||
if: steps.get_ci_id.outputs.ci_id != 0
|
||||
uses: actions/download-artifact@v4
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
merge-multiple: true
|
||||
run-id: ${{ steps.get_ci_id.outputs.ci_id }}
|
||||
github-token: ${{ github.token }}
|
||||
|
||||
- run: |
|
||||
ls
|
||||
|
||||
- name: upload release assets
|
||||
if: steps.get_ci_id.outputs.ci_id != 0
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
TAG: ${{ steps.get_ci_id.outputs.tag }}
|
||||
run: |
|
||||
for file in $(find . -type f); do
|
||||
case "$file" in
|
||||
*json*) echo "Skipping $file...";;
|
||||
*) echo "Uploading $file..."; gh release upload $TAG "$file" --clobber --repo="${GH_REPOSITORY}" || echo "Something went wrong, skipping.";;
|
||||
esac
|
||||
done
|
||||
|
||||
- name: upload release assets to website
|
||||
if: steps.get_ci_id.outputs.ci_id != 0
|
||||
env:
|
||||
TAG: ${{ steps.get_ci_id.outputs.tag }}
|
||||
run: |
|
||||
mkdir -p -v ~/.ssh
|
||||
|
||||
echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts
|
||||
echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519
|
||||
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
|
||||
cat >>~/.ssh/config <<END
|
||||
Host website
|
||||
HostName ${{ secrets.WEB_UPLOAD_SSH_HOSTNAME }}
|
||||
User ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
StrictHostKeyChecking yes
|
||||
AddKeysToAgent no
|
||||
ForwardX11 no
|
||||
BatchMode yes
|
||||
END
|
||||
|
||||
echo "Creating tag directory on web server"
|
||||
ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/"
|
||||
ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/"
|
||||
|
||||
for file in $(find . -type f); do
|
||||
case "$file" in
|
||||
*json*) echo "Skipping $file...";;
|
||||
*) echo "Uploading $file to website"; scp $file website:/var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/$file;;
|
||||
esac
|
||||
done
|
152
.gitlab-ci.yml
152
.gitlab-ci.yml
|
@ -1,152 +0,0 @@
|
|||
stages:
|
||||
- ci
|
||||
- artifacts
|
||||
- publish
|
||||
|
||||
variables:
|
||||
# Makes some things print in color
|
||||
TERM: ansi
|
||||
# Faster cache and artifact compression / decompression
|
||||
FF_USE_FASTZIP: true
|
||||
# Print progress reports for cache and artifact transfers
|
||||
TRANSFER_METER_FREQUENCY: 5s
|
||||
NIX_CONFIG: |
|
||||
show-trace = true
|
||||
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://conduwuit.cachix.org
|
||||
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
experimental-features = nix-command flakes
|
||||
extra-experimental-features = nix-command flakes
|
||||
accept-flake-config = true
|
||||
|
||||
# Avoid duplicate pipelines
|
||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||
workflow:
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
|
||||
when: never
|
||||
- if: $CI
|
||||
|
||||
before_script:
|
||||
# Enable nix-command and flakes
|
||||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||
# Accept flake config from "untrusted" users
|
||||
- if command -v nix > /dev/null; then echo "accept-flake-config = true" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add conduwuit binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduwuit" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduit" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add alternate binary cache
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add crane binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add nix-community binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://aseipp-nix-cache.freetls.fastly.net" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Install direnv and nix-direnv
|
||||
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
||||
|
||||
# Allow .envrc
|
||||
- if command -v nix > /dev/null; then direnv allow; fi
|
||||
|
||||
# Set CARGO_HOME to a cacheable path
|
||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||
|
||||
ci:
|
||||
stage: ci
|
||||
image: nixos/nix:2.24.9
|
||||
script:
|
||||
# Cache CI dependencies
|
||||
- ./bin/nix-build-and-cache ci
|
||||
|
||||
- direnv exec . engage
|
||||
cache:
|
||||
key: nix
|
||||
paths:
|
||||
- target
|
||||
- .gitlab-ci.d
|
||||
rules:
|
||||
# CI on upstream runners (only available for maintainers)
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
|
||||
# Manual CI on unprotected branches that are not MRs
|
||||
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
|
||||
when: manual
|
||||
# Manual CI on forks
|
||||
- if: $IS_UPSTREAM_CI != "true"
|
||||
when: manual
|
||||
- if: $CI
|
||||
interruptible: true
|
||||
|
||||
artifacts:
|
||||
stage: artifacts
|
||||
image: nixos/nix:2.24.9
|
||||
script:
|
||||
- ./bin/nix-build-and-cache just .#static-x86_64-linux-musl
|
||||
- cp result/bin/conduit x86_64-linux-musl
|
||||
|
||||
- mkdir -p target/release
|
||||
- cp result/bin/conduit target/release
|
||||
- direnv exec . cargo deb --no-build --no-strip
|
||||
- mv target/debian/*.deb x86_64-linux-musl.deb
|
||||
|
||||
# Since the OCI image package is based on the binary package, this has the
|
||||
# fun side effect of uploading the normal binary too. Conduit users who are
|
||||
# deploying with Nix can leverage this fact by adding our binary cache to
|
||||
# their systems.
|
||||
#
|
||||
# Note that although we have an `oci-image-x86_64-linux-musl`
|
||||
# output, we don't build it because it would be largely redundant to this
|
||||
# one since it's all containerized anyway.
|
||||
- ./bin/nix-build-and-cache just .#oci-image
|
||||
- cp result oci-image-amd64.tar.gz
|
||||
|
||||
- ./bin/nix-build-and-cache just .#static-aarch64-linux-musl
|
||||
- cp result/bin/conduit aarch64-linux-musl
|
||||
|
||||
- ./bin/nix-build-and-cache just .#oci-image-aarch64-linux-musl
|
||||
- cp result oci-image-arm64v8.tar.gz
|
||||
|
||||
- ./bin/nix-build-and-cache just .#book
|
||||
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746
|
||||
- cp -r --dereference result public
|
||||
artifacts:
|
||||
paths:
|
||||
- x86_64-linux-musl
|
||||
- aarch64-linux-musl
|
||||
- x86_64-linux-musl.deb
|
||||
- oci-image-amd64.tar.gz
|
||||
- oci-image-arm64v8.tar.gz
|
||||
- public
|
||||
rules:
|
||||
# CI required for all MRs
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
# Optional CI on forks
|
||||
- if: $IS_UPSTREAM_CI != "true"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI
|
||||
interruptible: true
|
||||
|
||||
pages:
|
||||
stage: publish
|
||||
dependencies:
|
||||
- artifacts
|
||||
only:
|
||||
- next
|
||||
script:
|
||||
- "true"
|
||||
artifacts:
|
||||
paths:
|
||||
- public
|
|
@ -1,8 +0,0 @@
|
|||
|
||||
<!-- Please describe your changes here -->
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test`
|
||||
- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# Docs: Map markdown to html files
|
||||
- source: /docs/(.+)\.md/
|
||||
public: '\1.html'
|
16
.mailmap
Normal file
16
.mailmap
Normal file
|
@ -0,0 +1,16 @@
|
|||
AlexPewMaster <git@alex.unbox.at> <68469103+AlexPewMaster@users.noreply.github.com>
|
||||
Daniel Wiesenberg <weasy@hotmail.de> <weasy666@gmail.com>
|
||||
Devin Ragotzy <devin.ragotzy@gmail.com> <d6ragotzy@wmich.edu>
|
||||
Devin Ragotzy <devin.ragotzy@gmail.com> <dragotzy7460@mail.kvcc.edu>
|
||||
Jonas Platte <jplatte+git@posteo.de> <jplatte+gitlab@posteo.de>
|
||||
Jonas Zohren <git-pbkyr@jzohren.de> <gitlab-jfowl-0ux98@sh14.de>
|
||||
Jonathan de Jong <jonathan@automatia.nl> <jonathandejong02@gmail.com>
|
||||
June Clementine Strawberry <june@3.dog> <june@girlboss.ceo>
|
||||
June Clementine Strawberry <june@3.dog> <strawberry@pupbrain.dev>
|
||||
June Clementine Strawberry <june@3.dog> <strawberry@puppygock.gay>
|
||||
Olivia Lee <olivia@computer.surgery> <benjamin@computer.surgery>
|
||||
Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
|
||||
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
|
||||
Timo Kösters <timo@koesters.xyz>
|
||||
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
|
||||
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>
|
47
.pre-commit-config.yaml
Normal file
47
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,47 @@
|
|||
default_install_hook_types:
|
||||
- pre-commit
|
||||
- commit-msg
|
||||
default_stages:
|
||||
- pre-commit
|
||||
- manual
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: fix-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-symlinks
|
||||
- id: destroyed-symlinks
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: mixed-line-ending
|
||||
- id: check-merge-conflict
|
||||
- id: check-added-large-files
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.26.0
|
||||
hooks:
|
||||
- id: typos
|
||||
- id: typos
|
||||
name: commit-msg-typos
|
||||
stages: [commit-msg]
|
||||
|
||||
- repo: https://github.com/crate-ci/committed
|
||||
rev: v1.1.7
|
||||
hooks:
|
||||
- id: committed
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: cargo-fmt
|
||||
name: cargo fmt
|
||||
entry: cargo +nightly fmt --
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
stages:
|
||||
- pre-commit
|
23
.typos.toml
Normal file
23
.typos.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[files]
|
||||
extend-exclude = ["*.csr", "*.lock", "pnpm-lock.yaml"]
|
||||
|
||||
[default]
|
||||
|
||||
extend-ignore-re = [
|
||||
"(?Rm)^.*(#|//|<!--)\\s*spellchecker:disable-line(\\s*-->)$", # Ignore a line by making it trail with a `spellchecker:disable-line` comment
|
||||
"^[0-9a-f]{7,}$", # Commit hashes
|
||||
|
||||
# some heuristics for base64 strings
|
||||
"[A-Za-z0-9+=]{72,}",
|
||||
"([A-Za-z0-9+=]|\\\\\\s\\*){72,}",
|
||||
"[0-9+][A-Za-z0-9+]{30,}[a-z0-9+]",
|
||||
"\\$[A-Z0-9+][A-Za-z0-9+]{6,}[a-z0-9+]",
|
||||
"\\b[a-z0-9+/=][A-Za-z0-9+/=]{7,}[a-z0-9+/=][A-Z]\\b",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
"allocatedp" = "allocatedp"
|
||||
"conduwuit" = "conduwuit"
|
||||
"continuwuity" = "continuwuity"
|
||||
"continuwity" = "continuwuity"
|
||||
"execuse" = "execuse"
|
12
.vscode/settings.json
vendored
Normal file
12
.vscode/settings.json
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"cSpell.words": [
|
||||
"Forgejo",
|
||||
"appservice",
|
||||
"appservices",
|
||||
"conduwuit",
|
||||
"continuwuity",
|
||||
"homeserver",
|
||||
"homeservers"
|
||||
],
|
||||
"rust-analyzer.cargo.features": ["full"]
|
||||
}
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
@ -60,8 +59,7 @@ representative at an online or offline event.
|
|||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement over email at
|
||||
<strawberry@puppygock.gay> or over Matrix at @strawberry:puppygock.gay.
|
||||
reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) or email at <tom@tcpip.uk>, <jade@continuwuity.org> and <nex@continuwuity.org> respectively.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
|
|
218
CONTRIBUTING.md
218
CONTRIBUTING.md
|
@ -1,149 +1,173 @@
|
|||
# Contributing guide
|
||||
|
||||
This page is for about contributing to conduwuit. The
|
||||
[development](./development.md) page may be of interest for you as well.
|
||||
This page is about contributing to Continuwuity. The
|
||||
[development](./development.md) and [code style guide](./development/code_style.md) pages may be of interest for you as well.
|
||||
|
||||
If you would like to work on an [issue][issues] that is not assigned, preferably
|
||||
ask in the Matrix room first at [#conduwuit:puppygock.gay][conduwuit-matrix],
|
||||
ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix],
|
||||
and comment on it.
|
||||
|
||||
### Linting and Formatting
|
||||
### Code Style
|
||||
|
||||
It is mandatory all your changes satisfy the lints (clippy, rustc, rustdoc, etc)
|
||||
and your code is formatted via the **nightly** `cargo fmt`. A lot of the
|
||||
`rustfmt.toml` features depend on nightly toolchain. It would be ideal if they
|
||||
weren't nightly-exclusive features, but they currently still are. CI's rustfmt
|
||||
uses nightly.
|
||||
Please review and follow the [code style guide](./development/code_style.md) for formatting, linting, naming conventions, and other code standards.
|
||||
|
||||
If you need to allow a lint, please make sure it's either obvious as to why
|
||||
(e.g. clippy saying redundant clone but it's actually required) or it has a
|
||||
comment saying why. Do not write inefficient code for the sake of satisfying
|
||||
lints. If a lint is wrong and provides a more inefficient solution or
|
||||
suggestion, allow the lint and mention that in a comment.
|
||||
### Pre-commit Checks
|
||||
|
||||
### Running CI tests locally
|
||||
Continuwuity uses pre-commit hooks to enforce various coding standards and catch common issues before they're committed. These checks include:
|
||||
|
||||
conduwuit's CI for tests, linting, formatting, audit, etc use
|
||||
[`engage`][engage]. engage can be installed from nixpkgs or `cargo install
|
||||
engage`. conduwuit's Nix flake devshell has the nixpkgs engage with `direnv`.
|
||||
Use `engage --help` for more usage details.
|
||||
- Code formatting and linting
|
||||
- Typo detection (both in code and commit messages)
|
||||
- Checking for large files
|
||||
- Ensuring proper line endings and no trailing whitespace
|
||||
- Validating YAML, JSON, and TOML files
|
||||
- Checking for merge conflicts
|
||||
|
||||
To test, format, lint, etc that CI would do, install engage, allow the `.envrc`
|
||||
file using `direnv allow`, and run `engage`.
|
||||
You can run these checks locally by installing [prefligit](https://github.com/j178/prefligit):
|
||||
|
||||
All of the tasks are defined at the [engage.toml][engage.toml] file. You can
|
||||
view all of them neatly by running `engage list`
|
||||
|
||||
If you would like to run only a specific engage task group, use `just`:
|
||||
```bash
|
||||
# Requires UV: https://docs.astral.sh/uv/getting-started/installation/
|
||||
# Mac/linux: curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
# Windows: powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
|
||||
|
||||
- `engage just <group>`
|
||||
- Example: `engage just lints`
|
||||
# Install prefligit using cargo-binstall
|
||||
cargo binstall prefligit
|
||||
|
||||
If you would like to run a specific engage task in a specific group, use `just
|
||||
<GROUP> [TASK]`: `engage just lints cargo-fmt`
|
||||
# Install git hooks to run checks automatically
|
||||
prefligit install
|
||||
|
||||
The following binaries are used in [`engage.toml`][engage.toml]:
|
||||
# Run all checks
|
||||
prefligit --all-files
|
||||
```
|
||||
|
||||
- [`engage`][engage]
|
||||
- `nix`
|
||||
- [`direnv`][direnv]
|
||||
- `rustc`
|
||||
- `cargo`
|
||||
- `cargo-fmt`
|
||||
- `rustdoc`
|
||||
- `cargo-clippy`
|
||||
- [`cargo-audit`][cargo-audit]
|
||||
- [`cargo-deb`][cargo-deb]
|
||||
- [`lychee`][lychee]
|
||||
- [`markdownlint-cli`][markdownlint-cli]
|
||||
- `dpkg`
|
||||
Alternatively, you can use [pre-commit](https://pre-commit.com/):
|
||||
```bash
|
||||
# Requires python
|
||||
|
||||
# Install pre-commit
|
||||
pip install pre-commit
|
||||
|
||||
# Install the hooks
|
||||
pre-commit install
|
||||
|
||||
# Run all checks manually
|
||||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
These same checks are run in CI via the prefligit-checks workflow to ensure consistency. These must pass before the PR is merged.
|
||||
|
||||
### Running tests locally
|
||||
|
||||
Tests, compilation, and linting can be run with standard Cargo commands:
|
||||
|
||||
```bash
|
||||
# Run tests
|
||||
cargo test
|
||||
|
||||
# Check compilation
|
||||
cargo check --workspace --features full
|
||||
|
||||
# Run lints
|
||||
cargo clippy --workspace --features full
|
||||
# Auto-fix: cargo clippy --workspace --features full --fix --allow-staged;
|
||||
|
||||
# Format code (must use nightly)
|
||||
cargo +nightly fmt
|
||||
```
|
||||
|
||||
### Matrix tests
|
||||
|
||||
CI runs [Complement][complement], but currently does not fail if results from
|
||||
the checked-in results differ with the new results. If your changes are done to
|
||||
fix Matrix tests, note that in your pull request. If more Complement tests start
|
||||
failing from your changes, please review the logs (they are uploaded as
|
||||
artifacts) and determine if they're intended or not.
|
||||
Continuwuity uses [Complement][complement] for Matrix protocol compliance testing. Complement tests are run manually by developers, and documentation on how to run these tests locally is currently being developed.
|
||||
|
||||
If you'd like to run Complement locally using Nix, see the
|
||||
[testing](development/testing.md) page.
|
||||
If your changes are done to fix Matrix tests, please note that in your pull request. If more Complement tests start failing from your changes, please review the logs and determine if they're intended or not.
|
||||
|
||||
[Sytest][sytest] support will come soon.
|
||||
[Sytest][sytest] is currently unsupported.
|
||||
|
||||
### Writing documentation
|
||||
|
||||
conduwuit's website uses [`mdbook`][mdbook] and deployed via CI using GitHub
|
||||
Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's
|
||||
mdbook in the devshell. All documentation is in the `docs/` directory at the top
|
||||
level. The compiled mdbook website is also uploaded as an artifact.
|
||||
Continuwuity's website uses [`mdbook`][mdbook] and is deployed via CI using Cloudflare Pages
|
||||
in the [`documentation.yml`][documentation.yml] workflow file. All documentation is in the `docs/`
|
||||
directory at the top level.
|
||||
|
||||
To build the documentation using Nix, run: `bin/nix-build-and-cache just .#book`
|
||||
To build the documentation locally:
|
||||
|
||||
The output of the mdbook generation is in `result/`. mdbooks can be opened in
|
||||
your browser from the individual HTML files without any web server needed.
|
||||
1. Install mdbook if you don't have it already:
|
||||
```bash
|
||||
cargo install mdbook # or cargo binstall, or another method
|
||||
```
|
||||
|
||||
### Inclusivity and Diversity
|
||||
2. Build the documentation:
|
||||
```bash
|
||||
mdbook build
|
||||
```
|
||||
|
||||
All **MUST** code and write with inclusivity and diversity in mind. See the
|
||||
[following page by Google on writing inclusive code and
|
||||
documentation](https://developers.google.com/style/inclusive-documentation).
|
||||
The output of the mdbook generation is in `public/`. You can open the HTML files directly in your browser without needing a web server.
|
||||
|
||||
This **EXPLICITLY** forbids usage of terms like "blacklist"/"whitelist" and
|
||||
"master"/"slave", [forbids gender-specific words and
|
||||
phrases](https://developers.google.com/style/pronouns#gender-neutral-pronouns),
|
||||
forbids ableist language like "sanity-check", "cripple", or "insane", and
|
||||
forbids culture-specific language (e.g. US-only holidays or cultures).
|
||||
|
||||
No exceptions are allowed. Dependencies that may use these terms are allowed but
|
||||
[do not replicate the name in your functions or
|
||||
variables](https://developers.google.com/style/inclusive-documentation#write-around).
|
||||
### Commit Messages
|
||||
|
||||
In addition to language, write and code with the user experience in mind. This
|
||||
is software that intends to be used by everyone, so make it easy and comfortable
|
||||
for everyone to use. 🏳️⚧️
|
||||
Continuwuity follows the [Conventional Commits](https://www.conventionalcommits.org/) specification for commit messages. This provides a standardized format that makes the commit history more readable and enables automated tools to generate changelogs.
|
||||
|
||||
### Variable, comment, function, etc standards
|
||||
The basic structure is:
|
||||
|
||||
Rust's default style and standards with regards to [function names, variable
|
||||
names, comments](https://rust-lang.github.io/api-guidelines/naming.html), etc
|
||||
applies here.
|
||||
```
|
||||
<type>[(optional scope)]: <description>
|
||||
|
||||
[optional body]
|
||||
|
||||
[optional footer(s)]
|
||||
```
|
||||
|
||||
The allowed types for commits are:
|
||||
- `fix`: Bug fixes
|
||||
- `feat`: New features
|
||||
- `docs`: Documentation changes
|
||||
- `style`: Changes that don't affect the meaning of the code (formatting, etc.)
|
||||
- `refactor`: Code changes that neither fix bugs nor add features
|
||||
- `perf`: Performance improvements
|
||||
- `test`: Adding or fixing tests
|
||||
- `build`: Changes to the build system or dependencies
|
||||
- `ci`: Changes to CI configuration
|
||||
- `chore`: Other changes that don't modify source or test files
|
||||
|
||||
Examples:
|
||||
```
|
||||
feat: add user authentication
|
||||
fix(database): resolve connection pooling issue
|
||||
docs: update installation instructions
|
||||
```
|
||||
|
||||
The project uses the `committed` hook to validate commit messages in pre-commit. This ensures all commits follow the conventional format.
|
||||
|
||||
### Creating pull requests
|
||||
|
||||
Please try to keep contributions to the GitHub. While the mirrors of conduwuit
|
||||
allow for pull/merge requests, there is no guarantee I will see them in a timely
|
||||
Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity
|
||||
allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely
|
||||
manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts.
|
||||
This prevents me from having to ping once in a while to double check the status
|
||||
This prevents us from having to ping once in a while to double check the status
|
||||
of it, especially when the CI completed successfully and everything so it
|
||||
*looks* done.
|
||||
|
||||
If you open a pull request on one of the mirrors, it is your responsibility to
|
||||
inform me about its existence. In the future I may try to solve this with more
|
||||
repo bots in the conduwuit Matrix room. There is no mailing list or email-patch
|
||||
support on the sr.ht mirror, but if you'd like to email me a git patch you can
|
||||
do so at `strawberry@puppygock.gay`.
|
||||
Before submitting a pull request, please ensure:
|
||||
1. Your code passes all CI checks (formatting, linting, typo detection, etc.)
|
||||
2. Your code follows the [code style guide](./development/code_style.md)
|
||||
3. Your commit messages follow the conventional commits format
|
||||
4. Tests are added for new functionality
|
||||
5. Documentation is updated if needed
|
||||
|
||||
Direct all PRs/MRs to the `main` branch.
|
||||
|
||||
By sending a pull request or patch, you are agreeing that your changes are
|
||||
allowed to be licenced under the Apache-2.0 licence and all of your conduct is
|
||||
in line with the Contributor's Covenant, and conduwuit's Code of Conduct.
|
||||
in line with the Contributor's Covenant, and continuwuity's Code of Conduct.
|
||||
|
||||
Contribution by users who violate either of these code of conducts will not have
|
||||
Contribution by users who violate either of these code of conducts may not have
|
||||
their contributions accepted. This includes users who have been banned from
|
||||
conduwuit Matrix rooms for Code of Conduct violations.
|
||||
continuwuity Matrix rooms for Code of Conduct violations.
|
||||
|
||||
[issues]: https://github.com/girlbossceo/conduwuit/issues
|
||||
[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay
|
||||
[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues
|
||||
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
|
||||
[complement]: https://github.com/matrix-org/complement/
|
||||
[engage.toml]: https://github.com/girlbossceo/conduwuit/blob/main/engage.toml
|
||||
[engage]: https://charles.page.computer.surgery/engage/
|
||||
[sytest]: https://github.com/matrix-org/sytest/
|
||||
[cargo-deb]: https://github.com/kornelski/cargo-deb
|
||||
[lychee]: https://github.com/lycheeverse/lychee
|
||||
[markdownlint-cli]: https://github.com/igorshubovych/markdownlint-cli
|
||||
[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit
|
||||
[direnv]: https://direnv.net/
|
||||
[mdbook]: https://rust-lang.github.io/mdBook/
|
||||
[documentation.yml]: https://github.com/girlbossceo/conduwuit/blob/main/.github/workflows/documentation.yml
|
||||
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
|
||||
|
|
2332
Cargo.lock
generated
2332
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
155
Cargo.toml
155
Cargo.toml
|
@ -2,7 +2,7 @@
|
|||
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = ["src/*"]
|
||||
members = ["src/*", "xtask/*"]
|
||||
default-members = ["src/*"]
|
||||
|
||||
[workspace.package]
|
||||
|
@ -14,14 +14,14 @@ authors = [
|
|||
categories = ["network-programming"]
|
||||
description = "a very cool Matrix chat homeserver written in Rust"
|
||||
edition = "2024"
|
||||
homepage = "https://conduwuit.puppyirl.gay/"
|
||||
homepage = "https://continuwuity.org/"
|
||||
keywords = ["chat", "matrix", "networking", "server", "uwu"]
|
||||
license = "Apache-2.0"
|
||||
# See also `rust-toolchain.toml`
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/girlbossceo/conduwuit"
|
||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||
rust-version = "1.86.0"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.7"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
|
@ -48,15 +48,15 @@ features = ["ffi", "std", "union"]
|
|||
version = "0.6.2"
|
||||
|
||||
[workspace.dependencies.ctor]
|
||||
version = "0.2.9"
|
||||
version = "0.5.0"
|
||||
|
||||
[workspace.dependencies.cargo_toml]
|
||||
version = "0.21"
|
||||
version = "0.22"
|
||||
default-features = false
|
||||
features = ["features"]
|
||||
|
||||
[workspace.dependencies.toml]
|
||||
version = "0.8.14"
|
||||
version = "0.9.5"
|
||||
default-features = false
|
||||
features = ["parse"]
|
||||
|
||||
|
@ -213,6 +213,8 @@ default-features = false
|
|||
version = "0.3.19"
|
||||
default-features = false
|
||||
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
|
||||
[workspace.dependencies.tracing-journald]
|
||||
version = "0.3.1"
|
||||
[workspace.dependencies.tracing-core]
|
||||
version = "0.1.33"
|
||||
default-features = false
|
||||
|
@ -298,7 +300,7 @@ version = "1.15.0"
|
|||
default-features = false
|
||||
features = ["serde"]
|
||||
|
||||
# Used for reading the configuration from conduwuit.toml & environment variables
|
||||
# Used for reading the configuration from continuwuity.toml & environment variables
|
||||
[workspace.dependencies.figment]
|
||||
version = "0.10.19"
|
||||
default-features = false
|
||||
|
@ -348,9 +350,9 @@ version = "0.1.2"
|
|||
|
||||
# Used for matrix spec type definitions and helpers
|
||||
[workspace.dependencies.ruma]
|
||||
git = "https://github.com/girlbossceo/ruwuma"
|
||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||
#branch = "conduwuit-changes"
|
||||
rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
rev = "8fb268fa2771dfc3a1c8075ef1246e7c9a0a53fd"
|
||||
features = [
|
||||
"compat",
|
||||
"rand",
|
||||
|
@ -388,8 +390,8 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.rust-rocksdb]
|
||||
git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1"
|
||||
rev = "1c267e0bf0cc7b7702e9a329deccd89de79ef4c3"
|
||||
git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1"
|
||||
rev = "99b0319416b64830dd6f8943e1f65e15aeef18bc"
|
||||
default-features = false
|
||||
features = [
|
||||
"multi-threaded-cf",
|
||||
|
@ -409,25 +411,28 @@ default-features = false
|
|||
|
||||
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
|
||||
[workspace.dependencies.opentelemetry]
|
||||
version = "0.21.0"
|
||||
version = "0.30.0"
|
||||
|
||||
[workspace.dependencies.tracing-flame]
|
||||
version = "0.2.0"
|
||||
|
||||
[workspace.dependencies.tracing-opentelemetry]
|
||||
version = "0.22.0"
|
||||
version = "0.31.0"
|
||||
|
||||
[workspace.dependencies.opentelemetry_sdk]
|
||||
version = "0.21.2"
|
||||
version = "0.30.0"
|
||||
features = ["rt-tokio"]
|
||||
|
||||
[workspace.dependencies.opentelemetry-jaeger]
|
||||
version = "0.20.0"
|
||||
features = ["rt-tokio"]
|
||||
[workspace.dependencies.opentelemetry-otlp]
|
||||
version = "0.30.0"
|
||||
features = ["http", "trace", "logs", "metrics"]
|
||||
|
||||
[workspace.dependencies.opentelemetry-jaeger-propagator]
|
||||
version = "0.30.0"
|
||||
|
||||
# optional sentry metrics for crash/panic reporting
|
||||
[workspace.dependencies.sentry]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"backtrace",
|
||||
|
@ -443,13 +448,13 @@ features = [
|
|||
]
|
||||
|
||||
[workspace.dependencies.sentry-tracing]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
[workspace.dependencies.sentry-tower]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
|
||||
# jemalloc usage
|
||||
[workspace.dependencies.tikv-jemalloc-sys]
|
||||
git = "https://github.com/girlbossceo/jemallocator"
|
||||
git = "https://forgejo.ellis.link/continuwuation/jemallocator"
|
||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||
default-features = false
|
||||
features = [
|
||||
|
@ -457,7 +462,7 @@ features = [
|
|||
"unprefixed_malloc_on_supported_platforms",
|
||||
]
|
||||
[workspace.dependencies.tikv-jemallocator]
|
||||
git = "https://github.com/girlbossceo/jemallocator"
|
||||
git = "https://forgejo.ellis.link/continuwuation/jemallocator"
|
||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||
default-features = false
|
||||
features = [
|
||||
|
@ -465,7 +470,7 @@ features = [
|
|||
"unprefixed_malloc_on_supported_platforms",
|
||||
]
|
||||
[workspace.dependencies.tikv-jemalloc-ctl]
|
||||
git = "https://github.com/girlbossceo/jemallocator"
|
||||
git = "https://forgejo.ellis.link/continuwuation/jemallocator"
|
||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||
default-features = false
|
||||
features = ["use_std"]
|
||||
|
@ -474,7 +479,7 @@ features = ["use_std"]
|
|||
version = "0.4"
|
||||
|
||||
[workspace.dependencies.nix]
|
||||
version = "0.29.0"
|
||||
version = "0.30.1"
|
||||
default-features = false
|
||||
features = ["resource"]
|
||||
|
||||
|
@ -496,7 +501,7 @@ version = "0.4.3"
|
|||
default-features = false
|
||||
|
||||
[workspace.dependencies.termimad]
|
||||
version = "0.31.2"
|
||||
version = "0.34.0"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.checked_ops]
|
||||
|
@ -513,6 +518,14 @@ version = "1.0"
|
|||
[workspace.dependencies.proc-macro2]
|
||||
version = "1.0"
|
||||
|
||||
[workspace.dependencies.parking_lot]
|
||||
version = "0.12.4"
|
||||
features = ["hardware-lock-elision", "deadlock_detection"] # TODO: Check if deadlock_detection has a perf impact, if it does only enable with debug_assertions
|
||||
|
||||
# Use this when extending with_lock::WithLock to parking_lot
|
||||
[workspace.dependencies.lock_api]
|
||||
version = "0.4.13"
|
||||
|
||||
[workspace.dependencies.bytesize]
|
||||
version = "2.0"
|
||||
|
||||
|
@ -526,66 +539,70 @@ version = "0.2"
|
|||
version = "0.2"
|
||||
|
||||
[workspace.dependencies.minicbor]
|
||||
version = "0.26.3"
|
||||
version = "2.1.1"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.minicbor-serde]
|
||||
version = "0.4.1"
|
||||
version = "0.6.0"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.maplit]
|
||||
version = "1.0.2"
|
||||
|
||||
[workspace.dependencies.ldap3]
|
||||
version = "0.11.5"
|
||||
default-features = false
|
||||
features = ["sync", "tls-rustls"]
|
||||
|
||||
#
|
||||
# Patches
|
||||
#
|
||||
|
||||
# backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing.
|
||||
# we can switch back to upstream if #2956 is merged and backported in the upstream repo.
|
||||
# https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
|
||||
# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
|
||||
[patch.crates-io.tracing-subscriber]
|
||||
git = "https://github.com/girlbossceo/tracing"
|
||||
git = "https://forgejo.ellis.link/continuwuation/tracing"
|
||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||
[patch.crates-io.tracing]
|
||||
git = "https://github.com/girlbossceo/tracing"
|
||||
git = "https://forgejo.ellis.link/continuwuation/tracing"
|
||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||
[patch.crates-io.tracing-core]
|
||||
git = "https://github.com/girlbossceo/tracing"
|
||||
git = "https://forgejo.ellis.link/continuwuation/tracing"
|
||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||
[patch.crates-io.tracing-log]
|
||||
git = "https://github.com/girlbossceo/tracing"
|
||||
git = "https://forgejo.ellis.link/continuwuation/tracing"
|
||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||
|
||||
# adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50
|
||||
# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b
|
||||
# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0002-add-tab-completion-callback.patch
|
||||
# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0001-add-event-for-ctrl.patch
|
||||
[patch.crates-io.rustyline-async]
|
||||
git = "https://github.com/girlbossceo/rustyline-async"
|
||||
rev = "deaeb0694e2083f53d363b648da06e10fc13900c"
|
||||
git = "https://forgejo.ellis.link/continuwuation/rustyline-async"
|
||||
rev = "e9f01cf8c6605483cb80b3b0309b400940493d7f"
|
||||
|
||||
# adds LIFO queue scheduling; this should be updated with PR progress.
|
||||
[patch.crates-io.event-listener]
|
||||
git = "https://github.com/girlbossceo/event-listener"
|
||||
git = "https://forgejo.ellis.link/continuwuation/event-listener"
|
||||
rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d"
|
||||
[patch.crates-io.async-channel]
|
||||
git = "https://github.com/girlbossceo/async-channel"
|
||||
git = "https://forgejo.ellis.link/continuwuation/async-channel"
|
||||
rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280"
|
||||
|
||||
# adds affinity masks for selecting more than one core at a time
|
||||
[patch.crates-io.core_affinity]
|
||||
git = "https://github.com/girlbossceo/core_affinity_rs"
|
||||
git = "https://forgejo.ellis.link/continuwuation/core_affinity_rs"
|
||||
rev = "9c8e51510c35077df888ee72a36b4b05637147da"
|
||||
|
||||
# reverts hyperium#148 conflicting with our delicate federation resolver hooks
|
||||
[patch.crates-io.hyper-util]
|
||||
git = "https://github.com/girlbossceo/hyper-util"
|
||||
git = "https://forgejo.ellis.link/continuwuation/hyper-util"
|
||||
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
|
||||
|
||||
# allows no-aaaa option in resolv.conf
|
||||
# bumps rust edition and toolchain to 1.86.0 and 2024
|
||||
# use sat_add on line number errors
|
||||
# Allows no-aaaa option in resolv.conf
|
||||
# Use 1-indexed line numbers when displaying parse error messages
|
||||
[patch.crates-io.resolv-conf]
|
||||
git = "https://github.com/girlbossceo/resolv-conf"
|
||||
rev = "200e958941d522a70c5877e3d846f55b5586c68d"
|
||||
git = "https://forgejo.ellis.link/continuwuation/resolv-conf"
|
||||
rev = "56251316cc4127bcbf36e68ce5e2093f4d33e227"
|
||||
|
||||
#
|
||||
# Our crates
|
||||
|
@ -626,6 +643,22 @@ package = "conduwuit_macros"
|
|||
path = "src/macros"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.conduwuit-web]
|
||||
package = "conduwuit_web"
|
||||
path = "src/web"
|
||||
default-features = false
|
||||
|
||||
|
||||
[workspace.dependencies.conduwuit-build-metadata]
|
||||
package = "conduwuit_build_metadata"
|
||||
path = "src/build_metadata"
|
||||
default-features = false
|
||||
|
||||
|
||||
[workspace.dependencies.conduwuit]
|
||||
package = "conduwuit"
|
||||
path = "src/main"
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Release profiles
|
||||
|
@ -734,25 +767,6 @@ incremental = true
|
|||
|
||||
[profile.dev.package.conduwuit_core]
|
||||
inherits = "dev"
|
||||
incremental = false
|
||||
#rustflags = [
|
||||
# '--cfg', 'conduwuit_mods',
|
||||
# '-Ztime-passes',
|
||||
# '-Zmir-opt-level=0',
|
||||
# '-Ztls-model=initial-exec',
|
||||
# '-Cprefer-dynamic=true',
|
||||
# '-Zstaticlib-prefer-dynamic=true',
|
||||
# '-Zstaticlib-allow-rdylib-deps=true',
|
||||
# '-Zpacked-bundled-libs=false',
|
||||
# '-Zplt=true',
|
||||
# '-Clink-arg=-Wl,--as-needed',
|
||||
# '-Clink-arg=-Wl,--allow-shlib-undefined',
|
||||
# '-Clink-arg=-Wl,-z,lazy',
|
||||
# '-Clink-arg=-Wl,-z,unique',
|
||||
# '-Clink-arg=-Wl,-z,nodlopen',
|
||||
# '-Clink-arg=-Wl,-z,nodelete',
|
||||
#]
|
||||
|
||||
[profile.dev.package.conduwuit]
|
||||
inherits = "dev"
|
||||
#rustflags = [
|
||||
|
@ -774,7 +788,6 @@ inherits = "dev"
|
|||
[profile.dev.package.'*']
|
||||
inherits = "dev"
|
||||
debug = 'limited'
|
||||
incremental = false
|
||||
codegen-units = 1
|
||||
opt-level = 'z'
|
||||
#rustflags = [
|
||||
|
@ -796,7 +809,6 @@ inherits = "dev"
|
|||
strip = false
|
||||
opt-level = 0
|
||||
codegen-units = 16
|
||||
incremental = false
|
||||
|
||||
[profile.test.package.'*']
|
||||
inherits = "dev"
|
||||
|
@ -804,7 +816,6 @@ debug = 0
|
|||
strip = false
|
||||
opt-level = 0
|
||||
codegen-units = 16
|
||||
incremental = false
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
|
@ -845,7 +856,7 @@ unused-qualifications = "warn"
|
|||
#unused-results = "warn" # TODO
|
||||
|
||||
## some sadness
|
||||
elided_named_lifetimes = "allow" # TODO!
|
||||
mismatched_lifetime_syntaxes = "allow" # TODO!
|
||||
let_underscore_drop = "allow"
|
||||
missing_docs = "allow"
|
||||
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
|
||||
|
@ -981,3 +992,9 @@ let_underscore_future = { level = "allow", priority = 1 }
|
|||
|
||||
# rust doesnt understand conduwuit's custom log macros
|
||||
literal_string_with_formatting_args = { level = "allow", priority = 1 }
|
||||
|
||||
|
||||
needless_raw_string_hashes = "allow"
|
||||
|
||||
# TODO: Enable this lint & fix all instances
|
||||
collapsible_if = "allow"
|
||||
|
|
233
README.md
233
README.md
|
@ -1,178 +1,123 @@
|
|||
# conduwuit
|
||||
|
||||
[](https://matrix.to/#/#conduwuit:puppygock.gay) [](https://matrix.to/#/#conduwuit-space:puppygock.gay)
|
||||
|
||||
[](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml)
|
||||
|
||||
    
|
||||
|
||||
|
||||
|
||||
&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dlatest) &link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dmain)
|
||||
|
||||
|
||||
# continuwuity
|
||||
|
||||
<!-- ANCHOR: catchphrase -->
|
||||
|
||||
### a very cool [Matrix](https://matrix.org/) chat homeserver written in Rust
|
||||
## A community-driven [Matrix](https://matrix.org/) homeserver in Rust
|
||||
|
||||
[](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) [](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
|
||||
|
||||
|
||||
|
||||
<!-- ANCHOR_END: catchphrase -->
|
||||
|
||||
Visit the [conduwuit documentation](https://conduwuit.puppyirl.gay/) for more
|
||||
information and how to deploy/setup conduwuit.
|
||||
[continuwuity] is a Matrix homeserver written in Rust.
|
||||
It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
|
||||
|
||||
<!-- ANCHOR: body -->
|
||||
|
||||
#### What is Matrix?
|
||||
[](https://forgejo.ellis.link/continuwuation/continuwuity) [](https://forgejo.ellis.link/continuwuation/continuwuity/stars) [](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open)
|
||||
|
||||
[](https://github.com/continuwuity/continuwuity) [](https://github.com/continuwuity/continuwuity/stargazers)
|
||||
|
||||
[](https://gitlab.com/continuwuity/continuwuity) [](https://gitlab.com/continuwuity/continuwuity/-/starrers)
|
||||
|
||||
[](https://codeberg.org/continuwuity/continuwuity) [](https://codeberg.org/continuwuity/continuwuity/stars)
|
||||
|
||||
### Why does this exist?
|
||||
|
||||
The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features.
|
||||
|
||||
We aim to provide a stable, well-maintained alternative for current conduwuit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver.
|
||||
|
||||
### Who are we?
|
||||
|
||||
We are a group of Matrix enthusiasts, developers and system administrators who have used conduwuit and believe in its potential. Our team includes both previous
|
||||
contributors to the original project and new developers who want to help maintain and improve this important piece of Matrix infrastructure.
|
||||
|
||||
We operate as an open community project, welcoming contributions from anyone interested in improving continuwuity.
|
||||
|
||||
### What is Matrix?
|
||||
|
||||
[Matrix](https://matrix.org) is an open, federated, and extensible network for
|
||||
decentralised communication. Users from any Matrix homeserver can chat with users from all
|
||||
decentralized communication. Users from any Matrix homeserver can chat with users from all
|
||||
other homeservers over federation. Matrix is designed to be extensible and built on top of.
|
||||
You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord.
|
||||
|
||||
#### What is the goal?
|
||||
### What are the project's goals?
|
||||
|
||||
A high-performance, efficient, low-cost, and featureful Matrix homeserver that's
|
||||
easy to set up and just works with minimal configuration needed.
|
||||
Continuwuity aims to:
|
||||
|
||||
#### Can I try it out?
|
||||
- Maintain a stable, reliable Matrix homeserver implementation in Rust
|
||||
- Improve compatibility and specification compliance with the Matrix protocol
|
||||
- Fix bugs and performance issues from the original conduwuit
|
||||
- Add missing features needed by homeserver administrators
|
||||
- Provide comprehensive documentation and easy deployment options
|
||||
- Create a sustainable development model for long-term maintenance
|
||||
- Keep a lightweight, efficient codebase that can run on modest hardware
|
||||
|
||||
An official conduwuit server ran by me is available at transfem.dev
|
||||
([element.transfem.dev](https://element.transfem.dev) /
|
||||
[cinny.transfem.dev](https://cinny.transfem.dev))
|
||||
### Can I try it out?
|
||||
|
||||
transfem.dev is a public homeserver that can be used, it is not a "test only
|
||||
homeserver". This means there are rules, so please read the rules:
|
||||
[https://transfem.dev/homeserver_rules.txt](https://transfem.dev/homeserver_rules.txt)
|
||||
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||
|
||||
transfem.dev is also listed at
|
||||
[servers.joinmatrix.org](https://servers.joinmatrix.org/), which is a list of
|
||||
popular public Matrix homeservers, including some others that run conduwuit.
|
||||
There are currently no open registration Continuwuity instances available.
|
||||
|
||||
#### What is the current status?
|
||||
### What are we working on?
|
||||
|
||||
conduwuit is technically a hard fork of [Conduit](https://conduit.rs/), which is in beta.
|
||||
The beta status initially was inherited from Conduit, however the huge amount of
|
||||
codebase divergance, changes, fixes, and improvements have effectively made this
|
||||
beta status not entirely applicable to us anymore.
|
||||
We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues).
|
||||
|
||||
conduwuit is very stable based on our rapidly growing userbase, has lots of features that users
|
||||
expect, and very usable as a daily driver for small, medium, and upper-end medium sized homeservers.
|
||||
- [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747)
|
||||
- [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0)
|
||||
- [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119)
|
||||
- Automated testing
|
||||
- [Admin API](https://forgejo.ellis.link/continuwuation/continuwuity/issues/748)
|
||||
- [Policy-list controlled moderation](https://forgejo.ellis.link/continuwuation/continuwuity/issues/750)
|
||||
|
||||
A lot of critical stability and performance issues have been fixed, and a lot of
|
||||
necessary groundwork has finished; making this project way better than it was
|
||||
back in the start at ~early 2024.
|
||||
### Can I migrate my data from x?
|
||||
|
||||
#### Where is the differences page?
|
||||
|
||||
conduwuit historically had a "differences" page that listed each and every single
|
||||
different thing about conduwuit from Conduit, as a way to promote and advertise
|
||||
conduwuit by showing significant amounts of work done. While this was feasible to
|
||||
maintain back when the project was new in early-2024, this became impossible
|
||||
very quickly and has unfortunately became heavily outdated, missing tons of things, etc.
|
||||
|
||||
It's difficult to list out what we do differently, what are our notable features, etc
|
||||
when there's so many things and features and bug fixes and performance optimisations,
|
||||
the list goes on. We simply recommend folks to just try out conduwuit, or ask us
|
||||
what features you are looking for and if they're implemented in conduwuit.
|
||||
|
||||
#### How is conduwuit funded? Is conduwuit sustainable?
|
||||
|
||||
conduwuit has no external funding. This is made possible purely in my freetime with
|
||||
contributors, also in their free time, and only by user-curated donations.
|
||||
|
||||
conduwuit has existed since around November 2023, but [only became more publicly known
|
||||
in March/April 2024](https://matrix.org/blog/2024/04/26/this-week-in-matrix-2024-04-26/#conduwuit-website)
|
||||
and we have no plans in stopping or slowing down any time soon!
|
||||
|
||||
#### Can I migrate or switch from Conduit?
|
||||
|
||||
conduwuit had drop-in migration/replacement support for Conduit for about 12 months before
|
||||
bugs somewhere along the line broke it. Maintaining this has been difficult and
|
||||
the majority of Conduit users have already migrated, additionally debugging Conduit
|
||||
is not one of our interests, and so Conduit migration no longer works. We also
|
||||
feel that 12 months has been plenty of time for people to seamlessly migrate.
|
||||
|
||||
If you are a Conduit user looking to migrate, you will have to wipe and reset
|
||||
your database. We may fix seamless migration support at some point, but it's not an interest
|
||||
from us.
|
||||
|
||||
#### Can I migrate from Synapse or Dendrite?
|
||||
|
||||
Currently there is no known way to seamlessly migrate all user data from the old
|
||||
homeserver to conduwuit. However it is perfectly acceptable to replace the old
|
||||
homeserver software with conduwuit using the same server name and there will not
|
||||
be any issues with federation.
|
||||
|
||||
There is an interest in developing a built-in seamless user data migration
|
||||
method into conduwuit, however there is no concrete ETA or timeline for this.
|
||||
- Conduwuit: Yes
|
||||
- Conduit: No, database is now incompatible
|
||||
- Grapevine: No, database is now incompatible
|
||||
- Dendrite: No
|
||||
- Synapse: No
|
||||
|
||||
We haven't written up a guide on migrating from incompatible homeservers yet. Reach out to us if you need to do this!
|
||||
|
||||
<!-- ANCHOR_END: body -->
|
||||
|
||||
## Contribution
|
||||
|
||||
### Development flow
|
||||
|
||||
- Features / changes must developed in a separate branch
|
||||
- For each change, create a descriptive PR
|
||||
- Your code will be reviewed by one or more of the continuwuity developers
|
||||
- The branch will be deployed live on multiple tester's matrix servers to shake out bugs
|
||||
- Once all testers and reviewers have agreed, the PR will be merged to the main branch
|
||||
- The main branch will have nightly builds deployed to users on the cutting edge
|
||||
- Every week or two, a new release is cut.
|
||||
|
||||
The main branch is always green!
|
||||
|
||||
|
||||
### Policy on pulling from other forks
|
||||
|
||||
We welcome contributions from other forks of conduwuit, subject to our review process.
|
||||
When incorporating code from other forks:
|
||||
|
||||
- All external contributions must go through our standard PR process
|
||||
- Code must meet our quality standards and pass tests
|
||||
- Code changes will require testing on multiple test servers before merging
|
||||
- Attribution will be given to original authors and forks
|
||||
- We prioritize stability and compatibility when evaluating external contributions
|
||||
- Features that align with our project goals will be given priority consideration
|
||||
|
||||
<!-- ANCHOR: footer -->
|
||||
|
||||
#### Contact
|
||||
|
||||
[`#conduwuit:puppygock.gay`](https://matrix.to/#/#conduwuit:puppygock.gay)
|
||||
is the official project Matrix room. You can get support here, ask questions or
|
||||
concerns, get assistance setting up conduwuit, etc.
|
||||
|
||||
This room should stay relevant and focused on conduwuit. An offtopic general
|
||||
chatter room can be found in the room topic there as well.
|
||||
|
||||
Please keep the issue trackers focused on *actual* bug reports and enhancement requests.
|
||||
|
||||
General support is extremely difficult to be offered over an issue tracker, and
|
||||
simple questions should be asked directly in an interactive platform like our
|
||||
Matrix room above as they can turn into a relevant discussion and/or may not be
|
||||
simple to answer. If you're not sure, just ask in the Matrix room.
|
||||
|
||||
If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new)
|
||||
|
||||
If you need to contact the primary maintainer, my contact methods are on my website: https://girlboss.ceo
|
||||
|
||||
#### Donate
|
||||
|
||||
conduwuit development is purely made possible by myself and contributors. I do
|
||||
not get paid to work on this, and I work on it in my free time. Donations are
|
||||
heavily appreciated! 💜🥺
|
||||
|
||||
- Liberapay: <https://liberapay.com/girlbossceo>
|
||||
- GitHub Sponsors: <https://github.com/sponsors/girlbossceo>
|
||||
- Ko-fi: <https://ko-fi.com/puppygock>
|
||||
|
||||
I do not and will not accept cryptocurrency donations, including things related.
|
||||
|
||||
Note that donations will NOT guarantee you or give you any kind of tangible product,
|
||||
feature prioritisation, etc. By donating, you are agreeing that conduwuit is NOT
|
||||
going to provide you any goods or services as part of your donation, and this
|
||||
donation is purely a generous donation. We will not provide things like paid
|
||||
personal/direct support, feature request priority, merchandise, etc.
|
||||
|
||||
#### Logo
|
||||
|
||||
Original repo and Matrix room picture was from bran (<3). Current banner image
|
||||
and logo is directly from [this cohost
|
||||
post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for).
|
||||
|
||||
An SVG logo made by [@nktnet1](https://github.com/nktnet1) is available here: <https://github.com/girlbossceo/conduwuit/blob/main/docs/assets/>
|
||||
|
||||
#### Is it conduwuit or Conduwuit?
|
||||
|
||||
Both, but I prefer conduwuit.
|
||||
|
||||
#### Mirrors of conduwuit
|
||||
|
||||
If GitHub is unavailable in your country, or has poor connectivity, conduwuit's
|
||||
source code is mirrored onto the following additional platforms I maintain:
|
||||
|
||||
- GitHub: <https://github.com/girlbossceo/conduwuit>
|
||||
- GitLab: <https://gitlab.com/conduwuit/conduwuit>
|
||||
- git.girlcock.ceo: <https://git.girlcock.ceo/strawberry/conduwuit>
|
||||
- git.gay: <https://git.gay/june/conduwuit>
|
||||
- mau.dev: <https://mau.dev/june/conduwuit>
|
||||
- Codeberg: <https://codeberg.org/arf/conduwuit>
|
||||
- sourcehut: <https://git.sr.ht/~girlbossceo/conduwuit>
|
||||
Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) and [space](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) to chat with us about the project!
|
||||
|
||||
<!-- ANCHOR_END: footer -->
|
||||
|
||||
|
||||
[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity
|
||||
|
|
63
SECURITY.md
Normal file
63
SECURITY.md
Normal file
|
@ -0,0 +1,63 @@
|
|||
# Security Policy for Continuwuity
|
||||
|
||||
This document outlines the security policy for Continuwuity. Our goal is to maintain a secure platform for all users, and we take security matters seriously.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We provide security updates for the following versions of Continuwuity:
|
||||
|
||||
| Version | Supported |
|
||||
| -------------- |:----------------:|
|
||||
| Latest release | ✅ |
|
||||
| Main branch | ✅ |
|
||||
| Older releases | ❌ |
|
||||
|
||||
We may backport fixes to the previous release at our discretion, but we don't guarantee this.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
### Responsible Disclosure
|
||||
|
||||
We appreciate the efforts of security researchers and the community in identifying and reporting vulnerabilities. To ensure that potential vulnerabilities are addressed properly, please follow these guidelines:
|
||||
|
||||
1. **Contact members of the team directly** over E2EE private message.
|
||||
- [@jade:ellis.link](https://matrix.to/#/@jade:ellis.link)
|
||||
- [@nex:nexy7574.co.uk](https://matrix.to/#/@nex:nexy7574.co.uk) <!-- ? -->
|
||||
2. **Email the security team** at [security@continuwuity.org](mailto:security@continuwuity.org). This is not E2EE, so don't include sensitive details.
|
||||
3. **Do not disclose the vulnerability publicly** until it has been addressed
|
||||
4. **Provide detailed information** about the vulnerability, including:
|
||||
- A clear description of the issue
|
||||
- Steps to reproduce
|
||||
- Potential impact
|
||||
- Any possible mitigations
|
||||
- Version(s) affected, including specific commits if possible
|
||||
|
||||
If you have any doubts about a potential security vulnerability, contact us via private channels first! We'd prefer that you bother us, instead of having a vulnerability disclosed without a fix.
|
||||
|
||||
### What to Expect
|
||||
|
||||
When you report a security vulnerability:
|
||||
|
||||
1. **Acknowledgment**: We will acknowledge receipt of your report.
|
||||
2. **Assessment**: We will assess the vulnerability and determine its impact on our users
|
||||
3. **Updates**: We will provide updates on our progress in addressing the vulnerability, and may request you help test mitigations
|
||||
4. **Resolution**: Once resolved, we will notify you and discuss coordinated disclosure
|
||||
5. **Credit**: We will recognize your contribution (unless you prefer to remain anonymous)
|
||||
|
||||
## Security Update Process
|
||||
|
||||
When security vulnerabilities are identified:
|
||||
|
||||
1. We will develop and test fixes in a private fork
|
||||
2. Security updates will be released as soon as possible
|
||||
3. Release notes will include information about the vulnerabilities, avoiding details that could facilitate exploitation where possible
|
||||
4. Critical security updates may be backported to the previous stable release
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Matrix Security Disclosure Policy](https://matrix.org/security-disclosure-policy/)
|
||||
- [Continuwuity Documentation](https://continuwuity.org/introduction)
|
||||
|
||||
---
|
||||
|
||||
This security policy was last updated on May 25, 2025.
|
|
@ -1,77 +0,0 @@
|
|||
[Unit]
|
||||
Description=conduwuit Matrix homeserver
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
Documentation=https://conduwuit.puppyirl.gay/
|
||||
RequiresMountsFor=/var/lib/private/conduwuit
|
||||
Alias=matrix-conduwuit.service
|
||||
|
||||
[Service]
|
||||
DynamicUser=yes
|
||||
Type=notify-reload
|
||||
ReloadSignal=SIGUSR1
|
||||
|
||||
TTYPath=/dev/tty25
|
||||
DeviceAllow=char-tty
|
||||
StandardInput=tty-force
|
||||
StandardOutput=tty
|
||||
StandardError=journal+console
|
||||
TTYReset=yes
|
||||
# uncomment to allow buffer to be cleared every restart
|
||||
TTYVTDisallocate=no
|
||||
|
||||
TTYColumns=120
|
||||
TTYRows=40
|
||||
|
||||
AmbientCapabilities=
|
||||
CapabilityBoundingSet=
|
||||
|
||||
DevicePolicy=closed
|
||||
LockPersonality=yes
|
||||
MemoryDenyWriteExecute=yes
|
||||
NoNewPrivileges=yes
|
||||
#ProcSubset=pid
|
||||
ProtectClock=yes
|
||||
ProtectControlGroups=yes
|
||||
ProtectHome=yes
|
||||
ProtectHostname=yes
|
||||
ProtectKernelLogs=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectProc=invisible
|
||||
ProtectSystem=strict
|
||||
PrivateDevices=yes
|
||||
PrivateMounts=yes
|
||||
PrivateTmp=yes
|
||||
PrivateUsers=yes
|
||||
PrivateIPC=yes
|
||||
RemoveIPC=yes
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
RestrictNamespaces=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service @resources
|
||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||
SystemCallErrorNumber=EPERM
|
||||
StateDirectory=conduwuit
|
||||
|
||||
RuntimeDirectory=conduwuit
|
||||
RuntimeDirectoryMode=0750
|
||||
|
||||
Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
|
||||
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
|
||||
|
||||
ExecStart=/usr/bin/conduwuit
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
TimeoutStopSec=4m
|
||||
TimeoutStartSec=4m
|
||||
|
||||
StartLimitInterval=1m
|
||||
StartLimitBurst=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
15
book.toml
15
book.toml
|
@ -1,8 +1,8 @@
|
|||
[book]
|
||||
title = "conduwuit 🏳️⚧️ 💜 🦴"
|
||||
description = "conduwuit, which is a well-maintained fork of Conduit, is a simple, fast and reliable chat server for the Matrix protocol"
|
||||
title = "continuwuity"
|
||||
description = "continuwuity is a community continuation of the conduwuit Matrix homeserver, written in Rust."
|
||||
language = "en"
|
||||
authors = ["strawberry (June)"]
|
||||
authors = ["The continuwuity Community"]
|
||||
text-direction = "ltr"
|
||||
multilingual = false
|
||||
src = "docs"
|
||||
|
@ -16,12 +16,9 @@ extra-watch-dirs = ["debian", "docs"]
|
|||
edition = "2024"
|
||||
|
||||
[output.html]
|
||||
git-repository-url = "https://github.com/girlbossceo/conduwuit"
|
||||
edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}"
|
||||
git-repository-icon = "fa-github-square"
|
||||
|
||||
[output.html.redirect]
|
||||
"/differences.html" = "https://conduwuit.puppyirl.gay/#where-is-the-differences-page"
|
||||
edit-url-template = "https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/{path}"
|
||||
git-repository-url = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||
git-repository-icon = "fa-git-alt"
|
||||
|
||||
[output.html.search]
|
||||
limit-results = 15
|
||||
|
|
3
committed.toml
Normal file
3
committed.toml
Normal file
|
@ -0,0 +1,3 @@
|
|||
style = "conventional"
|
||||
subject_length = 72
|
||||
allowed_types = ["ci", "build", "fix", "feat", "chore", "docs", "style", "refactor", "perf", "test"]
|
|
@ -1,4 +1,4 @@
|
|||
### conduwuit Configuration
|
||||
### continuwuity Configuration
|
||||
###
|
||||
### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE
|
||||
### OVERWRITTEN!
|
||||
|
@ -13,7 +13,7 @@
|
|||
### that say "YOU NEED TO EDIT THIS".
|
||||
###
|
||||
### For more information, see:
|
||||
### https://conduwuit.puppyirl.gay/configuration.html
|
||||
### https://continuwuity.org/configuration.html
|
||||
|
||||
[global]
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
|||
# suffix for user and room IDs/aliases.
|
||||
#
|
||||
# See the docs for reverse proxying and delegation:
|
||||
# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy
|
||||
# https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy
|
||||
#
|
||||
# Also see the `[global.well_known]` config section at the very bottom.
|
||||
#
|
||||
|
@ -32,11 +32,11 @@
|
|||
# YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE
|
||||
# WIPE.
|
||||
#
|
||||
# example: "conduwuit.woof"
|
||||
# example: "continuwuity.org"
|
||||
#
|
||||
#server_name =
|
||||
|
||||
# The default address (IPv4 or IPv6) conduwuit will listen on.
|
||||
# The default address (IPv4 or IPv6) continuwuity will listen on.
|
||||
#
|
||||
# If you are using Docker or a container NAT networking setup, this must
|
||||
# be "0.0.0.0".
|
||||
|
@ -46,10 +46,10 @@
|
|||
#
|
||||
#address = ["127.0.0.1", "::1"]
|
||||
|
||||
# The port(s) conduwuit will listen on.
|
||||
# The port(s) continuwuity will listen on.
|
||||
#
|
||||
# For reverse proxying, see:
|
||||
# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy
|
||||
# https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy
|
||||
#
|
||||
# If you are using Docker, don't change this, you'll need to map an
|
||||
# external port to this.
|
||||
|
@ -58,16 +58,17 @@
|
|||
#
|
||||
#port = 8008
|
||||
|
||||
# The UNIX socket conduwuit will listen on.
|
||||
# The UNIX socket continuwuity will listen on.
|
||||
#
|
||||
# conduwuit cannot listen on both an IP address and a UNIX socket. If
|
||||
# continuwuity cannot listen on both an IP address and a UNIX socket. If
|
||||
# listening on a UNIX socket, you MUST remove/comment the `address` key.
|
||||
#
|
||||
# Remember to make sure that your reverse proxy has access to this socket
|
||||
# file, either by adding your reverse proxy to the 'conduwuit' group or
|
||||
# granting world R/W permissions with `unix_socket_perms` (666 minimum).
|
||||
# file, either by adding your reverse proxy to the appropriate user group
|
||||
# or granting world R/W permissions with `unix_socket_perms` (666
|
||||
# minimum).
|
||||
#
|
||||
# example: "/run/conduwuit/conduwuit.sock"
|
||||
# example: "/run/continuwuity/continuwuity.sock"
|
||||
#
|
||||
#unix_socket_path =
|
||||
|
||||
|
@ -75,23 +76,25 @@
|
|||
#
|
||||
#unix_socket_perms = 660
|
||||
|
||||
# This is the only directory where conduwuit will save its data, including
|
||||
# media. Note: this was previously "/var/lib/matrix-conduit".
|
||||
# This is the only directory where continuwuity will save its data,
|
||||
# including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||
#
|
||||
# YOU NEED TO EDIT THIS.
|
||||
# YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
|
||||
# `systemd` service. The service file sets it to `/var/lib/conduwuit`
|
||||
# using an environment variable and also grants write access.
|
||||
#
|
||||
# example: "/var/lib/conduwuit"
|
||||
#
|
||||
#database_path =
|
||||
|
||||
# conduwuit supports online database backups using RocksDB's Backup engine
|
||||
# API. To use this, set a database backup path that conduwuit can write
|
||||
# to.
|
||||
# continuwuity supports online database backups using RocksDB's Backup
|
||||
# engine API. To use this, set a database backup path that continuwuity
|
||||
# can write to.
|
||||
#
|
||||
# For more information, see:
|
||||
# https://conduwuit.puppyirl.gay/maintenance.html#backups
|
||||
# https://continuwuity.org/maintenance.html#backups
|
||||
#
|
||||
# example: "/opt/conduwuit-db-backups"
|
||||
# example: "/opt/continuwuity-db-backups"
|
||||
#
|
||||
#database_backup_path =
|
||||
|
||||
|
@ -112,18 +115,14 @@
|
|||
#
|
||||
#new_user_displayname_suffix = "🏳️⚧️"
|
||||
|
||||
# If enabled, conduwuit will send a simple GET request periodically to
|
||||
# `https://pupbrain.dev/check-for-updates/stable` for any new
|
||||
# announcements made. Despite the name, this is not an update check
|
||||
# endpoint, it is simply an announcement check endpoint.
|
||||
# If enabled, continuwuity will send a simple GET request periodically to
|
||||
# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new
|
||||
# announcements or major updates. This is not an update check endpoint.
|
||||
#
|
||||
# This is disabled by default as this is rarely used except for security
|
||||
# updates or major updates.
|
||||
#
|
||||
#allow_check_for_updates = false
|
||||
#allow_announcements_check = true
|
||||
|
||||
# Set this to any float value to multiply conduwuit's in-memory LRU caches
|
||||
# with such as "auth_chain_cache_capacity".
|
||||
# Set this to any float value to multiply continuwuity's in-memory LRU
|
||||
# caches with such as "auth_chain_cache_capacity".
|
||||
#
|
||||
# May be useful if you have significant memory to spare to increase
|
||||
# performance.
|
||||
|
@ -135,7 +134,7 @@
|
|||
#
|
||||
#cache_capacity_modifier = 1.0
|
||||
|
||||
# Set this to any float value in megabytes for conduwuit to tell the
|
||||
# Set this to any float value in megabytes for continuwuity to tell the
|
||||
# database engine that this much memory is available for database read
|
||||
# caches.
|
||||
#
|
||||
|
@ -149,7 +148,7 @@
|
|||
#
|
||||
#db_cache_capacity_mb = varies by system
|
||||
|
||||
# Set this to any float value in megabytes for conduwuit to tell the
|
||||
# Set this to any float value in megabytes for continuwuity to tell the
|
||||
# database engine that this much memory is available for database write
|
||||
# caches.
|
||||
#
|
||||
|
@ -254,9 +253,9 @@
|
|||
# Enable using *only* TCP for querying your specified nameservers instead
|
||||
# of UDP.
|
||||
#
|
||||
# If you are running conduwuit in a container environment, this config
|
||||
# If you are running continuwuity in a container environment, this config
|
||||
# option may need to be enabled. For more details, see:
|
||||
# https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker
|
||||
# https://continuwuity.org/troubleshooting.html#potential-dns-issues-when-using-docker
|
||||
#
|
||||
#query_over_tcp_only = false
|
||||
|
||||
|
@ -328,12 +327,37 @@
|
|||
#
|
||||
#well_known_timeout = 10
|
||||
|
||||
# Federation client connection timeout (seconds). You should not set this
|
||||
# to high values, as dead homeservers can significantly slow down
|
||||
# federation, specifically key retrieval, which will take roughly the
|
||||
# amount of time you configure here given that a homeserver doesn't
|
||||
# respond. This will cause most clients to time out /keys/query, causing
|
||||
# E2EE and device verification to fail.
|
||||
#
|
||||
#federation_conn_timeout = 10
|
||||
|
||||
# Federation client request timeout (seconds). You most definitely want
|
||||
# this to be high to account for extremely large room joins, slow
|
||||
# homeservers, your own resources etc.
|
||||
#
|
||||
#federation_timeout = 300
|
||||
|
||||
# MSC4284 Policy server request timeout (seconds). Generally policy
|
||||
# servers should respond near instantly, however may slow down under
|
||||
# load. If a policy server doesn't respond in a short amount of time, the
|
||||
# room it is configured in may become unusable if this limit is set too
|
||||
# high. 10 seconds is a good default, however dropping this to 3-5 seconds
|
||||
# can be acceptable.
|
||||
#
|
||||
# Please be aware that policy requests are *NOT* currently re-tried, so if
|
||||
# a spam check request fails, the event will be assumed to be not spam,
|
||||
# which in some cases may result in spam being sent to or received from
|
||||
# the room that would typically be prevented.
|
||||
#
|
||||
# About policy servers: https://matrix.org/blog/2025/04/introducing-policy-servers/
|
||||
#
|
||||
#policy_server_request_timeout = 10
|
||||
|
||||
# Federation client idle connection pool timeout (seconds).
|
||||
#
|
||||
#federation_idle_timeout = 25
|
||||
|
@ -401,6 +425,22 @@
|
|||
#
|
||||
#allow_registration = false
|
||||
|
||||
# If registration is enabled, and this setting is true, new users
|
||||
# registered after the first admin user will be automatically suspended
|
||||
# and will require an admin to run `!admin users unsuspend <user_id>`.
|
||||
#
|
||||
# Suspended users are still able to read messages, make profile updates,
|
||||
# leave rooms, and deactivate their account, however cannot send messages,
|
||||
# invites, or create/join or otherwise modify rooms.
|
||||
# They are effectively read-only.
|
||||
#
|
||||
# If you want to use this to screen people who register on your server,
|
||||
# you should add a room to `auto_join_rooms` that is public, and contains
|
||||
# information that new users can read (since they won't be able to DM
|
||||
# anyone, or send a message, and may be confused).
|
||||
#
|
||||
#suspend_on_register = false
|
||||
|
||||
# Enabling this setting opens registration to anyone without restrictions.
|
||||
# This makes your server vulnerable to abuse
|
||||
#
|
||||
|
@ -422,12 +462,32 @@
|
|||
# tokens. Multiple tokens can be added if you separate them with
|
||||
# whitespace
|
||||
#
|
||||
# conduwuit must be able to access the file, and it must not be empty
|
||||
# continuwuity must be able to access the file, and it must not be empty
|
||||
#
|
||||
# example: "/etc/conduwuit/.reg_token"
|
||||
# example: "/etc/continuwuity/.reg_token"
|
||||
#
|
||||
#registration_token_file =
|
||||
|
||||
# The public site key for reCaptcha. If this is provided, reCaptcha
|
||||
# becomes required during registration. If both captcha *and*
|
||||
# registration token are enabled, both will be required during
|
||||
# registration.
|
||||
#
|
||||
# IMPORTANT: "Verify the origin of reCAPTCHA solutions" **MUST** BE
|
||||
# DISABLED IF YOU WANT THE CAPTCHA TO WORK IN 3RD PARTY CLIENTS, OR
|
||||
# CLIENTS HOSTED ON DOMAINS OTHER THAN YOUR OWN!
|
||||
#
|
||||
# Registration must be enabled (`allow_registration` must be true) for
|
||||
# this to have any effect.
|
||||
#
|
||||
#recaptcha_site_key =
|
||||
|
||||
# The private site key for reCaptcha.
|
||||
# If this is omitted, captcha registration will not work,
|
||||
# even if `recaptcha_site_key` is set.
|
||||
#
|
||||
#recaptcha_private_site_key =
|
||||
|
||||
# Controls whether encrypted rooms and events are allowed.
|
||||
#
|
||||
#allow_encryption = true
|
||||
|
@ -516,28 +576,34 @@
|
|||
#allow_room_creation = true
|
||||
|
||||
# Set to false to disable users from joining or creating room versions
|
||||
# that aren't officially supported by conduwuit.
|
||||
# that aren't officially supported by continuwuity.
|
||||
#
|
||||
# conduwuit officially supports room versions 6 - 11.
|
||||
# continuwuity officially supports room versions 6 - 11.
|
||||
#
|
||||
# conduwuit has slightly experimental (though works fine in practice)
|
||||
# continuwuity has slightly experimental (though works fine in practice)
|
||||
# support for versions 3 - 5.
|
||||
#
|
||||
#allow_unstable_room_versions = true
|
||||
|
||||
# Default room version conduwuit will create rooms with.
|
||||
# Default room version continuwuity will create rooms with.
|
||||
#
|
||||
# Per spec, room version 11 is the default.
|
||||
#
|
||||
#default_room_version = 11
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
|
||||
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
|
||||
# Jaeger) that supports the OpenTelemetry Protocol.
|
||||
#
|
||||
#allow_jaeger = false
|
||||
# Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
# environment variable (defaults to http://localhost:4318).
|
||||
#
|
||||
#allow_otlp = false
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Filter for OTLP tracing spans. This controls which spans are exported
|
||||
# to the OTLP collector.
|
||||
#
|
||||
#jaeger_filter = "info"
|
||||
#otlp_filter = "info"
|
||||
|
||||
# If the 'perf_measurements' compile-time feature is enabled, enables
|
||||
# collecting folded stack trace profile of tracing spans using
|
||||
|
@ -591,7 +657,7 @@
|
|||
# Servers listed here will be used to gather public keys of other servers
|
||||
# (notary trusted key servers).
|
||||
#
|
||||
# Currently, conduwuit doesn't support inbound batched key requests, so
|
||||
# Currently, continuwuity doesn't support inbound batched key requests, so
|
||||
# this list should only contain other Synapse servers.
|
||||
#
|
||||
# example: ["matrix.org", "tchncs.de"]
|
||||
|
@ -632,7 +698,7 @@
|
|||
#
|
||||
#trusted_server_batch_size = 1024
|
||||
|
||||
# Max log level for conduwuit. Allows debug, info, warn, or error.
|
||||
# Max log level for continuwuity. Allows debug, info, warn, or error.
|
||||
#
|
||||
# See also:
|
||||
# https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||
|
@ -653,8 +719,9 @@
|
|||
#
|
||||
#log_span_events = "none"
|
||||
|
||||
# Configures whether CONDUWUIT_LOG EnvFilter matches values using regular
|
||||
# expressions. See the tracing_subscriber documentation on Directives.
|
||||
# Configures whether CONTINUWUITY_LOG EnvFilter matches values using
|
||||
# regular expressions. See the tracing_subscriber documentation on
|
||||
# Directives.
|
||||
#
|
||||
#log_filter_regex = true
|
||||
|
||||
|
@ -662,6 +729,21 @@
|
|||
#
|
||||
#log_thread_ids = false
|
||||
|
||||
# Enable journald logging on Unix platforms
|
||||
#
|
||||
# When enabled, log output will be sent to the systemd journal
|
||||
# This is only supported on Unix platforms
|
||||
#
|
||||
#log_to_journald = false
|
||||
|
||||
# The syslog identifier to use with journald logging
|
||||
#
|
||||
# Only used when journald logging is enabled
|
||||
#
|
||||
# Defaults to the binary name
|
||||
#
|
||||
#journald_identifier =
|
||||
|
||||
# OpenID token expiration/TTL in seconds.
|
||||
#
|
||||
# These are the OpenID tokens that are primarily used for Matrix account
|
||||
|
@ -722,7 +804,7 @@
|
|||
# This takes priority over "turn_secret" first, and falls back to
|
||||
# "turn_secret" if invalid or failed to open.
|
||||
#
|
||||
# example: "/etc/conduwuit/.turn_secret"
|
||||
# example: "/etc/continuwuity/.turn_secret"
|
||||
#
|
||||
#turn_secret_file =
|
||||
|
||||
|
@ -730,12 +812,12 @@
|
|||
#
|
||||
#turn_ttl = 86400
|
||||
|
||||
# List/vector of room IDs or room aliases that conduwuit will make newly
|
||||
# registered users join. The rooms specified must be rooms that you have
|
||||
# joined at least once on the server, and must be public.
|
||||
# List/vector of room IDs or room aliases that continuwuity will make
|
||||
# newly registered users join. The rooms specified must be rooms that you
|
||||
# have joined at least once on the server, and must be public.
|
||||
#
|
||||
# example: ["#conduwuit:puppygock.gay",
|
||||
# "!eoIzvAvVwY23LPDay8:puppygock.gay"]
|
||||
# example: ["#continuwuity:continuwuity.org",
|
||||
# "!main-1:continuwuity.org"]
|
||||
#
|
||||
#auto_join_rooms = []
|
||||
|
||||
|
@ -758,10 +840,10 @@
|
|||
#
|
||||
#auto_deactivate_banned_room_attempts = false
|
||||
|
||||
# RocksDB log level. This is not the same as conduwuit's log level. This
|
||||
# is the log level for the RocksDB engine/library which show up in your
|
||||
# database folder/path as `LOG` files. conduwuit will log RocksDB errors
|
||||
# as normal through tracing or panics if severe for safety.
|
||||
# RocksDB log level. This is not the same as continuwuity's log level.
|
||||
# This is the log level for the RocksDB engine/library which show up in
|
||||
# your database folder/path as `LOG` files. continuwuity will log RocksDB
|
||||
# errors as normal through tracing or panics if severe for safety.
|
||||
#
|
||||
#rocksdb_log_level = "error"
|
||||
|
||||
|
@ -781,7 +863,7 @@
|
|||
# Set this to true to use RocksDB config options that are tailored to HDDs
|
||||
# (slower device storage).
|
||||
#
|
||||
# It is worth noting that by default, conduwuit will use RocksDB with
|
||||
# It is worth noting that by default, continuwuity will use RocksDB with
|
||||
# Direct IO enabled. *Generally* speaking this improves performance as it
|
||||
# bypasses buffered I/O (system page cache). However there is a potential
|
||||
# chance that Direct IO may cause issues with database operations if your
|
||||
|
@ -789,7 +871,7 @@
|
|||
# possibly ZFS filesystem. RocksDB generally deals/corrects these issues
|
||||
# but it cannot account for all setups. If you experience any weird
|
||||
# RocksDB issues, try enabling this option as it turns off Direct IO and
|
||||
# feel free to report in the conduwuit Matrix room if this option fixes
|
||||
# feel free to report in the continuwuity Matrix room if this option fixes
|
||||
# your DB issues.
|
||||
#
|
||||
# For more information, see:
|
||||
|
@ -844,7 +926,7 @@
|
|||
# as they all differ. See their `kDefaultCompressionLevel`.
|
||||
#
|
||||
# Note when using the default value we may override it with a setting
|
||||
# tailored specifically conduwuit.
|
||||
# tailored specifically for continuwuity.
|
||||
#
|
||||
#rocksdb_compression_level = 32767
|
||||
|
||||
|
@ -860,7 +942,7 @@
|
|||
# algorithm.
|
||||
#
|
||||
# Note when using the default value we may override it with a setting
|
||||
# tailored specifically conduwuit.
|
||||
# tailored specifically for continuwuity.
|
||||
#
|
||||
#rocksdb_bottommost_compression_level = 32767
|
||||
|
||||
|
@ -900,13 +982,13 @@
|
|||
# 0 = AbsoluteConsistency
|
||||
# 1 = TolerateCorruptedTailRecords (default)
|
||||
# 2 = PointInTime (use me if trying to recover)
|
||||
# 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty)
|
||||
# 3 = SkipAnyCorruptedRecord (you now voided your Continuwuity warranty)
|
||||
#
|
||||
# For more information on these modes, see:
|
||||
# https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes
|
||||
#
|
||||
# For more details on recovering a corrupt database, see:
|
||||
# https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption
|
||||
# https://continuwuity.org/troubleshooting.html#database-corruption
|
||||
#
|
||||
#rocksdb_recovery_mode = 1
|
||||
|
||||
|
@ -946,7 +1028,7 @@
|
|||
# - Disabling repair mode and restarting the server is recommended after
|
||||
# running the repair.
|
||||
#
|
||||
# See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database.
|
||||
# See https://continuwuity.org/troubleshooting.html#database-corruption for more details on recovering a corrupt database.
|
||||
#
|
||||
#rocksdb_repair = false
|
||||
|
||||
|
@ -970,10 +1052,10 @@
|
|||
#
|
||||
#rocksdb_compaction_ioprio_idle = true
|
||||
|
||||
# Disables RocksDB compaction. You should never ever have to set this
|
||||
# option to true. If you for some reason find yourself needing to use this
|
||||
# option as part of troubleshooting or a bug, please reach out to us in
|
||||
# the conduwuit Matrix room with information and details.
|
||||
# Enables RocksDB compaction. You should never ever have to set this
|
||||
# option to false. If you for some reason find yourself needing to use
|
||||
# this option as part of troubleshooting or a bug, please reach out to us
|
||||
# in the continuwuity Matrix room with information and details.
|
||||
#
|
||||
# Disabling compaction will lead to a significantly bloated and
|
||||
# explosively large database, gradually poor performance, unnecessarily
|
||||
|
@ -999,7 +1081,7 @@
|
|||
# purposes such as recovering/recreating your admin room, or inviting
|
||||
# yourself back.
|
||||
#
|
||||
# See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room.
|
||||
# See https://continuwuity.org/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room.
|
||||
#
|
||||
# Once this password is unset, all sessions will be logged out for
|
||||
# security purposes.
|
||||
|
@ -1014,8 +1096,8 @@
|
|||
|
||||
# Allow local (your server only) presence updates/requests.
|
||||
#
|
||||
# Note that presence on conduwuit is very fast unlike Synapse's. If using
|
||||
# outgoing presence, this MUST be enabled.
|
||||
# Note that presence on continuwuity is very fast unlike Synapse's. If
|
||||
# using outgoing presence, this MUST be enabled.
|
||||
#
|
||||
#allow_local_presence = true
|
||||
|
||||
|
@ -1023,7 +1105,7 @@
|
|||
#
|
||||
# This option receives presence updates from other servers, but does not
|
||||
# send any unless `allow_outgoing_presence` is true. Note that presence on
|
||||
# conduwuit is very fast unlike Synapse's.
|
||||
# continuwuity is very fast unlike Synapse's.
|
||||
#
|
||||
#allow_incoming_presence = true
|
||||
|
||||
|
@ -1031,8 +1113,8 @@
|
|||
#
|
||||
# This option sends presence updates to other servers, but does not
|
||||
# receive any unless `allow_incoming_presence` is true. Note that presence
|
||||
# on conduwuit is very fast unlike Synapse's. If using outgoing presence,
|
||||
# you MUST enable `allow_local_presence` as well.
|
||||
# on continuwuity is very fast unlike Synapse's. If using outgoing
|
||||
# presence, you MUST enable `allow_local_presence` as well.
|
||||
#
|
||||
#allow_outgoing_presence = true
|
||||
|
||||
|
@ -1055,6 +1137,13 @@
|
|||
#
|
||||
#presence_timeout_remote_users = true
|
||||
|
||||
# Allow local read receipts.
|
||||
#
|
||||
# Disabling this will effectively also disable outgoing federated read
|
||||
# receipts.
|
||||
#
|
||||
#allow_local_read_receipts = true
|
||||
|
||||
# Allow receiving incoming read receipts from remote servers.
|
||||
#
|
||||
#allow_incoming_read_receipts = true
|
||||
|
@ -1063,6 +1152,13 @@
|
|||
#
|
||||
#allow_outgoing_read_receipts = true
|
||||
|
||||
# Allow local typing updates.
|
||||
#
|
||||
# Disabling this will effectively also disable outgoing federated typing
|
||||
# updates.
|
||||
#
|
||||
#allow_local_typing = true
|
||||
|
||||
# Allow outgoing typing updates to federation.
|
||||
#
|
||||
#allow_outgoing_typing = true
|
||||
|
@ -1085,8 +1181,8 @@
|
|||
#
|
||||
#typing_client_timeout_max_s = 45
|
||||
|
||||
# Set this to true for conduwuit to compress HTTP response bodies using
|
||||
# zstd. This option does nothing if conduwuit was not built with
|
||||
# Set this to true for continuwuity to compress HTTP response bodies using
|
||||
# zstd. This option does nothing if continuwuity was not built with
|
||||
# `zstd_compression` feature. Please be aware that enabling HTTP
|
||||
# compression may weaken TLS. Most users should not need to enable this.
|
||||
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
||||
|
@ -1094,8 +1190,8 @@
|
|||
#
|
||||
#zstd_compression = false
|
||||
|
||||
# Set this to true for conduwuit to compress HTTP response bodies using
|
||||
# gzip. This option does nothing if conduwuit was not built with
|
||||
# Set this to true for continuwuity to compress HTTP response bodies using
|
||||
# gzip. This option does nothing if continuwuity was not built with
|
||||
# `gzip_compression` feature. Please be aware that enabling HTTP
|
||||
# compression may weaken TLS. Most users should not need to enable this.
|
||||
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before
|
||||
|
@ -1106,8 +1202,8 @@
|
|||
#
|
||||
#gzip_compression = false
|
||||
|
||||
# Set this to true for conduwuit to compress HTTP response bodies using
|
||||
# brotli. This option does nothing if conduwuit was not built with
|
||||
# Set this to true for continuwuity to compress HTTP response bodies using
|
||||
# brotli. This option does nothing if continuwuity was not built with
|
||||
# `brotli_compression` feature. Please be aware that enabling HTTP
|
||||
# compression may weaken TLS. Most users should not need to enable this.
|
||||
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
||||
|
@ -1169,7 +1265,7 @@
|
|||
# Otherwise setting this to false reduces filesystem clutter and overhead
|
||||
# for managing these symlinks in the directory. This is now disabled by
|
||||
# default. You may still return to upstream Conduit but you have to run
|
||||
# conduwuit at least once with this set to true and allow the
|
||||
# continuwuity at least once with this set to true and allow the
|
||||
# media_startup_check to take place before shutting down to return to
|
||||
# Conduit.
|
||||
#
|
||||
|
@ -1186,26 +1282,40 @@
|
|||
#
|
||||
#prune_missing_media = false
|
||||
|
||||
# Vector list of regex patterns of server names that conduwuit will refuse
|
||||
# to download remote media from.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#prevent_media_downloads_from = []
|
||||
|
||||
# List of forbidden server names via regex patterns that we will block
|
||||
# incoming AND outgoing federation with, and block client room joins /
|
||||
# remote user invites.
|
||||
#
|
||||
# Note that your messages can still make it to forbidden servers through
|
||||
# backfilling. Events we receive from forbidden servers via backfill
|
||||
# from servers we *do* federate with will be stored in the database.
|
||||
#
|
||||
# This check is applied on the room ID, room alias, sender server name,
|
||||
# sender user's server name, inbound federation X-Matrix origin, and
|
||||
# outbound federation handler.
|
||||
#
|
||||
# Basically "global" ACLs.
|
||||
# You can set this to ["*"] to block all servers by default, and then
|
||||
# use `allowed_remote_server_names` to allow only specific servers.
|
||||
#
|
||||
# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#forbidden_remote_server_names = []
|
||||
|
||||
# List of allowed server names via regex patterns that we will allow,
|
||||
# regardless of if they match `forbidden_remote_server_names`.
|
||||
#
|
||||
# This option has no effect if `forbidden_remote_server_names` is empty.
|
||||
#
|
||||
# example: ["goodserver\\.tld$", "goodphrase"]
|
||||
#
|
||||
#allowed_remote_server_names = []
|
||||
|
||||
# Vector list of regex patterns of server names that continuwuity will
|
||||
# refuse to download remote media from.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#forbidden_remote_server_names = []
|
||||
#prevent_media_downloads_from = []
|
||||
|
||||
# List of forbidden server names via regex patterns that we will block all
|
||||
# outgoing federated room directory requests for. Useful for preventing
|
||||
|
@ -1215,8 +1325,31 @@
|
|||
#
|
||||
#forbidden_remote_room_directory_server_names = []
|
||||
|
||||
# Vector list of regex patterns of server names that continuwuity will not
|
||||
# send messages to the client from.
|
||||
#
|
||||
# Note that there is no way for clients to receive messages once a server
|
||||
# has become unignored without doing a full sync. This is a protocol
|
||||
# limitation with the current sync protocols. This means this is somewhat
|
||||
# of a nuclear option.
|
||||
#
|
||||
# example: ["reallybadserver\.tld$", "reallybadphrase",
|
||||
# "69dollarfortnitecards"]
|
||||
#
|
||||
#ignore_messages_from_server_names = []
|
||||
|
||||
# Send messages from users that the user has ignored to the client.
|
||||
#
|
||||
# There is no way for clients to receive messages sent while a user was
|
||||
# ignored without doing a full sync. This is a protocol limitation with
|
||||
# the current sync protocols. Disabling this option will move
|
||||
# responsibility of ignoring messages to the client, which can avoid this
|
||||
# limitation.
|
||||
#
|
||||
#send_messages_from_ignored_users_to_client = false
|
||||
|
||||
# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
||||
# do not want conduwuit to send outbound requests to. Defaults to
|
||||
# do not want continuwuity to send outbound requests to. Defaults to
|
||||
# RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
||||
# security.
|
||||
#
|
||||
|
@ -1366,26 +1499,26 @@
|
|||
|
||||
# Allow admins to enter commands in rooms other than "#admins" (admin
|
||||
# room) by prefixing your message with "\!admin" or "\\!admin" followed up
|
||||
# a normal conduwuit admin command. The reply will be publicly visible to
|
||||
# the room, originating from the sender.
|
||||
# a normal continuwuity admin command. The reply will be publicly visible
|
||||
# to the room, originating from the sender.
|
||||
#
|
||||
# example: \\!admin debug ping puppygock.gay
|
||||
#
|
||||
#admin_escape_commands = true
|
||||
|
||||
# Automatically activate the conduwuit admin room console / CLI on
|
||||
# startup. This option can also be enabled with `--console` conduwuit
|
||||
# Automatically activate the continuwuity admin room console / CLI on
|
||||
# startup. This option can also be enabled with `--console` continuwuity
|
||||
# argument.
|
||||
#
|
||||
#admin_console_automatic = false
|
||||
|
||||
# List of admin commands to execute on startup.
|
||||
#
|
||||
# This option can also be configured with the `--execute` conduwuit
|
||||
# This option can also be configured with the `--execute` continuwuity
|
||||
# argument and can take standard shell commands and environment variables
|
||||
#
|
||||
# For example: `./conduwuit --execute "server admin-notice conduwuit has
|
||||
# started up at $(date)"`
|
||||
# For example: `./continuwuity --execute "server admin-notice continuwuity
|
||||
# has started up at $(date)"`
|
||||
#
|
||||
# example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]`
|
||||
#
|
||||
|
@ -1393,7 +1526,7 @@
|
|||
|
||||
# Ignore errors in startup commands.
|
||||
#
|
||||
# If false, conduwuit will error and fail to start if an admin execute
|
||||
# If false, continuwuity will error and fail to start if an admin execute
|
||||
# command (`--execute` / `admin_execute`) fails.
|
||||
#
|
||||
#admin_execute_errors_ignore = false
|
||||
|
@ -1414,23 +1547,22 @@
|
|||
# The default room tag to apply on the admin room.
|
||||
#
|
||||
# On some clients like Element, the room tag "m.server_notice" is a
|
||||
# special pinned room at the very bottom of your room list. The conduwuit
|
||||
# admin room can be pinned here so you always have an easy-to-access
|
||||
# shortcut dedicated to your admin room.
|
||||
# special pinned room at the very bottom of your room list. The
|
||||
# continuwuity admin room can be pinned here so you always have an
|
||||
# easy-to-access shortcut dedicated to your admin room.
|
||||
#
|
||||
#admin_room_tag = "m.server_notice"
|
||||
|
||||
# Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
|
||||
# This is NOT enabled by default. conduwuit's default Sentry reporting
|
||||
# endpoint domain is `o4506996327251968.ingest.us.sentry.io`.
|
||||
# This is NOT enabled by default.
|
||||
#
|
||||
#sentry = false
|
||||
|
||||
# Sentry reporting URL, if a custom one is desired.
|
||||
#
|
||||
#sentry_endpoint = "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536"
|
||||
#sentry_endpoint = ""
|
||||
|
||||
# Report your conduwuit server_name in Sentry.io crash reports and
|
||||
# Report your continuwuity server_name in Sentry.io crash reports and
|
||||
# metrics.
|
||||
#
|
||||
#sentry_send_server_name = false
|
||||
|
@ -1467,7 +1599,7 @@
|
|||
# Enable the tokio-console. This option is only relevant to developers.
|
||||
#
|
||||
# For more information, see:
|
||||
# https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console
|
||||
# https://continuwuity.org/development.html#debugging-with-tokio-console
|
||||
#
|
||||
#tokio_console = false
|
||||
|
||||
|
@ -1572,6 +1704,10 @@
|
|||
#
|
||||
#config_reload_signal = true
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#ldap = false
|
||||
|
||||
[global.tls]
|
||||
|
||||
# Path to a valid TLS certificate file.
|
||||
|
@ -1607,19 +1743,29 @@
|
|||
#
|
||||
#server =
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# URL to a support page for the server, which will be served as part of
|
||||
# the MSC1929 server support endpoint at /.well-known/matrix/support.
|
||||
# Will be included alongside any contact information
|
||||
#
|
||||
#support_page =
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Role string for server support contacts, to be served as part of the
|
||||
# MSC1929 server support endpoint at /.well-known/matrix/support.
|
||||
#
|
||||
#support_role =
|
||||
#support_role = "m.role.admin"
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Email address for server support contacts, to be served as part of the
|
||||
# MSC1929 server support endpoint.
|
||||
# This will be used along with support_mxid if specified.
|
||||
#
|
||||
#support_email =
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Matrix ID for server support contacts, to be served as part of the
|
||||
# MSC1929 server support endpoint.
|
||||
# This will be used along with support_email if specified.
|
||||
#
|
||||
# If no email or mxid is specified, all of the server's admins will be
|
||||
# listed.
|
||||
#
|
||||
#support_mxid =
|
||||
|
||||
|
@ -1640,3 +1786,91 @@
|
|||
# is 33.55MB. Setting it to 0 disables blurhashing.
|
||||
#
|
||||
#blurhash_max_raw_size = 33554432
|
||||
|
||||
[global.ldap]
|
||||
|
||||
# Whether to enable LDAP login.
|
||||
#
|
||||
# example: "true"
|
||||
#
|
||||
#enable = false
|
||||
|
||||
# Whether to force LDAP authentication or authorize classical password
|
||||
# login.
|
||||
#
|
||||
# example: "true"
|
||||
#
|
||||
#ldap_only = false
|
||||
|
||||
# URI of the LDAP server.
|
||||
#
|
||||
# example: "ldap://ldap.example.com:389"
|
||||
#
|
||||
#uri = ""
|
||||
|
||||
# Root of the searches.
|
||||
#
|
||||
# example: "ou=users,dc=example,dc=org"
|
||||
#
|
||||
#base_dn = ""
|
||||
|
||||
# Bind DN if anonymous search is not enabled.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username. In such case, the password used to bind will be the
|
||||
# one provided for the login and not the one given by
|
||||
# `bind_password_file`. Beware: automatically granting admin rights will
|
||||
# not work if you use this direct bind instead of a LDAP search.
|
||||
#
|
||||
# example: "cn=ldap-reader,dc=example,dc=org" or
|
||||
# "cn={username},ou=users,dc=example,dc=org"
|
||||
#
|
||||
#bind_dn = ""
|
||||
|
||||
# Path to a file on the system that contains the password for the
|
||||
# `bind_dn`.
|
||||
#
|
||||
# The server must be able to access the file, and it must not be empty.
|
||||
#
|
||||
#bind_password_file = ""
|
||||
|
||||
# Search filter to limit user searches.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username for more complex filters.
|
||||
#
|
||||
# example: "(&(objectClass=person)(memberOf=matrix))"
|
||||
#
|
||||
#filter = "(objectClass=*)"
|
||||
|
||||
# Attribute to use to uniquely identify the user.
|
||||
#
|
||||
# example: "uid" or "cn"
|
||||
#
|
||||
#uid_attribute = "uid"
|
||||
|
||||
# Attribute containing the display name of the user.
|
||||
#
|
||||
# example: "givenName" or "sn"
|
||||
#
|
||||
#name_attribute = "givenName"
|
||||
|
||||
# Root of the searches for admin users.
|
||||
#
|
||||
# Defaults to `base_dn` if empty.
|
||||
#
|
||||
# example: "ou=admins,dc=example,dc=org"
|
||||
#
|
||||
#admin_base_dn = ""
|
||||
|
||||
# The LDAP search filter to find administrative users for continuwuity.
|
||||
#
|
||||
# If left blank, administrative state must be configured manually for each
|
||||
# user.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username for more complex filters.
|
||||
#
|
||||
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
|
||||
#
|
||||
#admin_filter = ""
|
||||
|
|
29
debian/README.md
vendored
29
debian/README.md
vendored
|
@ -1,29 +0,0 @@
|
|||
# conduwuit for Debian
|
||||
|
||||
Information about downloading and deploying the Debian package. This may also be
|
||||
referenced for other `apt`-based distros such as Ubuntu.
|
||||
|
||||
### Installation
|
||||
|
||||
It is recommended to see the [generic deployment guide](../deploying/generic.md)
|
||||
for further information if needed as usage of the Debian package is generally
|
||||
related.
|
||||
|
||||
No `apt` repository is currently offered yet, it is in the works/development.
|
||||
|
||||
### Configuration
|
||||
|
||||
When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml`
|
||||
as the default config. The config mentions things required to be changed before
|
||||
starting.
|
||||
|
||||
You can tweak more detailed settings by uncommenting and setting the config
|
||||
options in `/etc/conduwuit/conduwuit.toml`.
|
||||
|
||||
### Running
|
||||
|
||||
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop conduwuit. The binary is installed at `/usr/sbin/conduwuit`.
|
||||
|
||||
This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate.
|
||||
|
||||
Consult various online documentation and guides on setting up a reverse proxy and TLS. Caddy is documented at the [generic deployment guide](../deploying/generic.md#setting-up-the-reverse-proxy) as it's the easiest and most user friendly.
|
44
debian/postinst
vendored
44
debian/postinst
vendored
|
@ -1,44 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# TODO: implement debconf support that is maintainable without duplicating the config
|
||||
#. /usr/share/debconf/confmodule
|
||||
|
||||
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
||||
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# Create the `conduwuit` user if it does not exist yet.
|
||||
if ! getent passwd conduwuit > /dev/null ; then
|
||||
echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2
|
||||
adduser --system --group --quiet \
|
||||
--home "$CONDUWUIT_DATABASE_PATH" \
|
||||
--disabled-login \
|
||||
--shell "/usr/sbin/nologin" \
|
||||
conduwuit
|
||||
fi
|
||||
|
||||
# Create the database path if it does not exist yet and fix up ownership
|
||||
# and permissions for the config.
|
||||
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
|
||||
|
||||
# symlink the previous location for compatibility if it does not exist yet.
|
||||
if ! test -L "/var/lib/matrix-conduit" ; then
|
||||
ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
|
||||
fi
|
||||
|
||||
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
|
||||
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
|
||||
|
||||
chmod -v 740 "$CONDUWUIT_DATABASE_PATH"
|
||||
|
||||
echo ''
|
||||
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
||||
echo 'To start the server, run: systemctl start conduwuit.service'
|
||||
echo ''
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
260
docker/Dockerfile
Normal file
260
docker/Dockerfile
Normal file
|
@ -0,0 +1,260 @@
|
|||
ARG RUST_VERSION=1
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
|
||||
FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx
|
||||
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-${DEBIAN_VERSION} AS base
|
||||
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-${DEBIAN_VERSION} AS toolchain
|
||||
|
||||
# Prevent deletion of apt cache
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
||||
|
||||
# Match Rustc version as close as possible
|
||||
# rustc -vV
|
||||
ARG LLVM_VERSION=20
|
||||
# ENV RUSTUP_TOOLCHAIN=${RUST_VERSION}
|
||||
|
||||
# Install repo tools
|
||||
# Line one: compiler tools
|
||||
# Line two: curl, for downloading binaries
|
||||
# Line three: for xx-verify
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update && apt-get install -y \
|
||||
pkg-config make jq \
|
||||
curl git software-properties-common \
|
||||
file
|
||||
|
||||
# LLVM packages
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
curl https://apt.llvm.org/llvm.sh > llvm.sh && \
|
||||
chmod +x llvm.sh && \
|
||||
./llvm.sh ${LLVM_VERSION} && \
|
||||
rm llvm.sh
|
||||
|
||||
# Create symlinks for LLVM tools
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
# clang
|
||||
ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang
|
||||
ln -s "/usr/bin/clang++-${LLVM_VERSION}" "/usr/bin/clang++"
|
||||
# lld
|
||||
ln -s /usr/bin/ld64.lld-${LLVM_VERSION} /usr/bin/ld64.lld
|
||||
ln -s /usr/bin/ld.lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/lld
|
||||
ln -s /usr/bin/lld-link-${LLVM_VERSION} /usr/bin/lld-link
|
||||
ln -s /usr/bin/wasm-ld-${LLVM_VERSION} /usr/bin/wasm-ld
|
||||
EOF
|
||||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.13.0
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
ENV LDDTREE_VERSION=0.3.7
|
||||
|
||||
# Install unpackaged tools
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
|
||||
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
|
||||
EOF
|
||||
|
||||
# Set up xx (cross-compilation scripts)
|
||||
COPY --from=xx / /
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Install libraries linked by the binary
|
||||
# xx-* are xx-specific meta-packages
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
xx-apt-get install -y \
|
||||
xx-c-essentials xx-cxx-essentials pkg-config \
|
||||
liburing-dev
|
||||
|
||||
# Set up Rust toolchain
|
||||
WORKDIR /app
|
||||
COPY ./rust-toolchain.toml .
|
||||
RUN rustc --version \
|
||||
&& xx-cargo --setup-target-triple
|
||||
|
||||
# Build binary
|
||||
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
|
||||
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
|
||||
|
||||
# Configure pkg-config
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
|
||||
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
|
||||
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
|
||||
fi
|
||||
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Configure cc to use clang version
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
echo "CC=clang" >> /etc/environment
|
||||
echo "CXX=clang++" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Cross-language LTO
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
echo "CFLAGS=-flto" >> /etc/environment
|
||||
echo "CXXFLAGS=-flto" >> /etc/environment
|
||||
# Linker is set to target-compatible clang by xx
|
||||
echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Apply CPU-specific optimizations if TARGET_CPU is provided
|
||||
ARG TARGET_CPU
|
||||
|
||||
RUN <<EOF
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
if [ -n "${TARGET_CPU}" ]; then
|
||||
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
|
||||
fi
|
||||
EOF
|
||||
|
||||
# Prepare output directories
|
||||
RUN mkdir /out
|
||||
|
||||
FROM toolchain AS builder
|
||||
|
||||
|
||||
# Get source
|
||||
COPY . .
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Verify environment configuration
|
||||
RUN xx-cargo --print-target-triple
|
||||
|
||||
# Conduwuit version info
|
||||
ARG GIT_COMMIT_HASH
|
||||
ARG GIT_COMMIT_HASH_SHORT
|
||||
ARG GIT_REMOTE_URL
|
||||
ARG GIT_REMOTE_COMMIT_URL
|
||||
ARG CONDUWUIT_VERSION_EXTRA
|
||||
ARG CONTINUWUITY_VERSION_EXTRA
|
||||
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
|
||||
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
|
||||
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
|
||||
ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL
|
||||
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
|
||||
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
|
||||
|
||||
ARG RUST_PROFILE=release
|
||||
|
||||
# Build the binary
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \
|
||||
bash <<'EOF'
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
||||
jq -r ".target_directory"))
|
||||
mkdir /out/sbin
|
||||
PACKAGE=conduwuit
|
||||
xx-cargo build --locked --profile ${RUST_PROFILE} \
|
||||
-p $PACKAGE;
|
||||
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
|
||||
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
|
||||
for BINARY in "${BINARIES[@]}"; do
|
||||
echo $BINARY
|
||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
|
||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
|
||||
done
|
||||
EOF
|
||||
|
||||
# Generate Software Bill of Materials (SBOM)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
bash <<'EOF'
|
||||
set -o xtrace
|
||||
mkdir /out/sbom
|
||||
typeset -A PACKAGES
|
||||
for BINARY in /out/sbin/*; do
|
||||
BINARY_BASE=$(basename ${BINARY})
|
||||
package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name")
|
||||
if [ -z "$package" ]; then
|
||||
continue
|
||||
fi
|
||||
PACKAGES[$package]=1
|
||||
done
|
||||
for PACKAGE in $(echo ${!PACKAGES[@]}); do
|
||||
echo $PACKAGE
|
||||
cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json
|
||||
done
|
||||
EOF
|
||||
|
||||
# Extract dynamically linked dependencies
|
||||
RUN <<'DEPS_EOF'
|
||||
set -o xtrace
|
||||
mkdir /out/libs /out/libs-root
|
||||
|
||||
# Process each binary
|
||||
for BINARY in /out/sbin/*; do
|
||||
if lddtree_output=$(lddtree "$BINARY" 2>/dev/null) && [ -n "$lddtree_output" ]; then
|
||||
echo "$lddtree_output" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | \
|
||||
awk '{dest = ($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2; print "install -D " $1 " " dest}' | \
|
||||
while read cmd; do eval "$cmd"; done
|
||||
fi
|
||||
done
|
||||
|
||||
# Show what will be copied to runtime
|
||||
echo "=== Libraries being copied to runtime image:"
|
||||
find /out/libs* -type f 2>/dev/null | sort || echo "No libraries found"
|
||||
DEPS_EOF
|
||||
|
||||
FROM ubuntu:latest AS prepper
|
||||
|
||||
# Create layer structure
|
||||
RUN mkdir -p /layer1/etc/ssl/certs \
|
||||
/layer2/usr/lib \
|
||||
/layer3/sbin /layer3/sbom
|
||||
|
||||
# Copy SSL certs and root-path libraries to layer1 (ultra-stable)
|
||||
COPY --from=base /etc/ssl/certs /layer1/etc/ssl/certs
|
||||
COPY --from=builder /out/libs-root/ /layer1/
|
||||
|
||||
# Copy application libraries to layer2 (semi-stable)
|
||||
COPY --from=builder /out/libs/ /layer2/usr/lib/
|
||||
|
||||
# Copy binaries and SBOM to layer3 (volatile)
|
||||
COPY --from=builder /out/sbin/ /layer3/sbin/
|
||||
COPY --from=builder /out/sbom/ /layer3/sbom/
|
||||
|
||||
# Fix permissions after copying
|
||||
RUN chmod -R 755 /layer1 /layer2 /layer3
|
||||
|
||||
FROM scratch
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Copy ultra-stable layer (SSL certs, system libraries)
|
||||
COPY --from=prepper /layer1/ /
|
||||
|
||||
# Copy semi-stable layer (application libraries)
|
||||
COPY --from=prepper /layer2/ /
|
||||
|
||||
# Copy volatile layer (binaries, SBOM)
|
||||
COPY --from=prepper /layer3/ /
|
||||
|
||||
# Inform linker where to find libraries
|
||||
ENV LD_LIBRARY_PATH=/usr/lib
|
||||
|
||||
# Continuwuity default port
|
||||
EXPOSE 8008
|
||||
|
||||
CMD ["/sbin/conduwuit"]
|
200
docker/musl.Dockerfile
Normal file
200
docker/musl.Dockerfile
Normal file
|
@ -0,0 +1,200 @@
|
|||
# Why does this exist?
|
||||
# Debian doesn't provide prebuilt musl packages
|
||||
# rocksdb requires a prebuilt liburing, and linking fails if a gnu one is provided
|
||||
|
||||
ARG RUST_VERSION=1
|
||||
ARG ALPINE_VERSION=3.22
|
||||
|
||||
FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx
|
||||
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS toolchain
|
||||
|
||||
# Install repo tools and dependencies
|
||||
RUN --mount=type=cache,target=/etc/apk/cache apk add \
|
||||
build-base pkgconfig make jq bash \
|
||||
curl git file \
|
||||
llvm-dev clang clang-static lld
|
||||
|
||||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.13.0
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
ENV LDDTREE_VERSION=0.3.7
|
||||
|
||||
# Install unpackaged tools
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
|
||||
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
|
||||
EOF
|
||||
|
||||
# Set up xx (cross-compilation scripts)
|
||||
COPY --from=xx / /
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Install libraries linked by the binary
|
||||
RUN --mount=type=cache,target=/etc/apk/cache xx-apk add musl-dev gcc g++ liburing-dev
|
||||
|
||||
# Set up Rust toolchain
|
||||
WORKDIR /app
|
||||
COPY ./rust-toolchain.toml .
|
||||
RUN rustc --version \
|
||||
&& xx-cargo --setup-target-triple
|
||||
|
||||
# Build binary
|
||||
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
|
||||
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
|
||||
|
||||
# Configure pkg-config
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
|
||||
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
|
||||
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
|
||||
fi
|
||||
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Configure cc to use clang version
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
echo "CC=clang" >> /etc/environment
|
||||
echo "CXX=clang++" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Cross-language LTO
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
echo "CFLAGS=-flto" >> /etc/environment
|
||||
echo "CXXFLAGS=-flto" >> /etc/environment
|
||||
# Linker is set to target-compatible clang by xx
|
||||
echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Apply CPU-specific optimizations if TARGET_CPU is provided
|
||||
ARG TARGET_CPU
|
||||
|
||||
RUN <<EOF
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
if [ -n "${TARGET_CPU}" ]; then
|
||||
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
|
||||
fi
|
||||
EOF
|
||||
|
||||
# Prepare output directories
|
||||
RUN mkdir /out
|
||||
|
||||
FROM toolchain AS builder
|
||||
|
||||
|
||||
# Get source
|
||||
COPY . .
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Verify environment configuration
|
||||
RUN xx-cargo --print-target-triple
|
||||
|
||||
# Conduwuit version info
|
||||
ARG GIT_COMMIT_HASH
|
||||
ARG GIT_COMMIT_HASH_SHORT
|
||||
ARG GIT_REMOTE_URL
|
||||
ARG GIT_REMOTE_COMMIT_URL
|
||||
ARG CONDUWUIT_VERSION_EXTRA
|
||||
ARG CONTINUWUITY_VERSION_EXTRA
|
||||
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
|
||||
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
|
||||
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
|
||||
ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL
|
||||
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
|
||||
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
|
||||
|
||||
ARG RUST_PROFILE=release
|
||||
|
||||
# Build the binary
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-musl-${RUST_PROFILE} \
|
||||
bash <<'EOF'
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
||||
jq -r ".target_directory"))
|
||||
mkdir /out/sbin
|
||||
PACKAGE=conduwuit
|
||||
xx-cargo build --locked --profile ${RUST_PROFILE} \
|
||||
-p $PACKAGE --no-default-features --features bindgen-static,release_max_log_level,standard;
|
||||
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
|
||||
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
|
||||
for BINARY in "${BINARIES[@]}"; do
|
||||
echo $BINARY
|
||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
|
||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
|
||||
done
|
||||
EOF
|
||||
|
||||
# Generate Software Bill of Materials (SBOM)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
bash <<'EOF'
|
||||
set -o xtrace
|
||||
mkdir /out/sbom
|
||||
typeset -A PACKAGES
|
||||
for BINARY in /out/sbin/*; do
|
||||
BINARY_BASE=$(basename ${BINARY})
|
||||
package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name")
|
||||
if [ -z "$package" ]; then
|
||||
continue
|
||||
fi
|
||||
PACKAGES[$package]=1
|
||||
done
|
||||
for PACKAGE in $(echo ${!PACKAGES[@]}); do
|
||||
echo $PACKAGE
|
||||
cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json
|
||||
done
|
||||
EOF
|
||||
|
||||
# Extract dynamically linked dependencies
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
mkdir /out/libs
|
||||
mkdir /out/libs-root
|
||||
for BINARY in /out/sbin/*; do
|
||||
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
|
||||
done
|
||||
EOF
|
||||
|
||||
FROM scratch
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Copy root certs for tls into image
|
||||
# You can also mount the certs from the host
|
||||
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
|
||||
COPY --from=base /etc/ssl/certs /etc/ssl/certs
|
||||
|
||||
# Copy our build
|
||||
COPY --from=builder /out/sbin/ /sbin/
|
||||
# Copy SBOM
|
||||
COPY --from=builder /out/sbom/ /sbom/
|
||||
|
||||
# Copy dynamic libraries to root
|
||||
COPY --from=builder /out/libs-root/ /
|
||||
COPY --from=builder /out/libs/ /usr/lib/
|
||||
|
||||
# Inform linker where to find libraries
|
||||
ENV LD_LIBRARY_PATH=/usr/lib
|
||||
|
||||
# Continuwuity default port
|
||||
EXPOSE 8008
|
||||
|
||||
CMD ["/sbin/conduwuit"]
|
|
@ -15,8 +15,11 @@
|
|||
- [Appservices](appservices.md)
|
||||
- [Maintenance](maintenance.md)
|
||||
- [Troubleshooting](troubleshooting.md)
|
||||
- [Admin Command Reference](admin_reference.md)
|
||||
- [Development](development.md)
|
||||
- [Contributing](contributing.md)
|
||||
- [Code Style Guide](development/code_style.md)
|
||||
- [Testing](development/testing.md)
|
||||
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
||||
- [conduwuit Community Code of Conduct](conduwuit_coc.md)
|
||||
- [Community (and Guidelines)](community.md)
|
||||
- [Security](security.md)
|
||||
|
|
2673
docs/admin_reference.md
Normal file
2673
docs/admin_reference.md
Normal file
File diff suppressed because it is too large
Load diff
|
@ -3,8 +3,8 @@
|
|||
## Getting help
|
||||
|
||||
If you run into any problems while setting up an Appservice: ask us in
|
||||
[#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay) or
|
||||
[open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new).
|
||||
[#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) or
|
||||
[open an issue on Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new).
|
||||
|
||||
## Set up the appservice - general instructions
|
||||
|
||||
|
@ -14,7 +14,7 @@ later starting it.
|
|||
|
||||
At some point the appservice guide should ask you to add a registration yaml
|
||||
file to the homeserver. In Synapse you would do this by adding the path to the
|
||||
homeserver.yaml, but in conduwuit you can do this from within Matrix:
|
||||
homeserver.yaml, but in Continuwuity you can do this from within Matrix:
|
||||
|
||||
First, go into the `#admins` room of your homeserver. The first person that
|
||||
registered on the homeserver automatically joins it. Then send a message into
|
||||
|
@ -37,9 +37,9 @@ You can confirm it worked by sending a message like this:
|
|||
|
||||
The server bot should answer with `Appservices (1): your-bridge`
|
||||
|
||||
Then you are done. conduwuit will send messages to the appservices and the
|
||||
Then you are done. Continuwuity will send messages to the appservices and the
|
||||
appservice can send requests to the homeserver. You don't need to restart
|
||||
conduwuit, but if it doesn't work, restarting while the appservice is running
|
||||
Continuwuity, but if it doesn't work, restarting while the appservice is running
|
||||
could help.
|
||||
|
||||
## Appservice-specific instructions
|
||||
|
|
139
docs/community.md
Normal file
139
docs/community.md
Normal file
|
@ -0,0 +1,139 @@
|
|||
# Continuwuity Community Guidelines
|
||||
|
||||
Welcome to the Continuwuity commuwunity! We're excited to have you here. Continuwuity is a
|
||||
continuation of the conduwuit homeserver, which in turn is a hard-fork of the Conduit homeserver,
|
||||
aimed at making Matrix more accessible and inclusive for everyone.
|
||||
|
||||
This space is dedicated to fostering a positive, supportive, and welcoming environment for everyone.
|
||||
These guidelines apply to all Continuwuity spaces, including our Matrix rooms and any other
|
||||
community channels that reference them. We've written these guidelines to help us all create an
|
||||
environment where everyone feels safe and respected.
|
||||
|
||||
For code and contribution guidelines, please refer to the
|
||||
[Contributor's Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md).
|
||||
Below are additional guidelines specific to the Continuwuity community.
|
||||
|
||||
## Our Values and Expected Behaviors
|
||||
|
||||
We strive to create a community based on mutual respect, collaboration, and inclusivity. We expect
|
||||
all members to:
|
||||
|
||||
1. **Be Respectful and Inclusive**: Treat everyone with respect. We're committed to a community
|
||||
where everyone feels safe, regardless of background, identity, or experience. Discrimination,
|
||||
harassment, or hate speech won't be tolerated. Remember that each person experiences the world
|
||||
differently; share your own perspective and be open to learning about others'.
|
||||
|
||||
2. **Be Positive and Constructive**: Engage in discussions constructively and support each other.
|
||||
If you feel angry or frustrated, take a break before participating. Approach disagreements with
|
||||
the goal of understanding, not winning. Focus on the issue, not the person.
|
||||
|
||||
3. **Communicate Clearly and Kindly**: Our community includes neurodivergent individuals and those
|
||||
who may not appreciate sarcasm or subtlety. Communicate clearly and kindly. Avoid ambiguity and
|
||||
ensure your messages can be easily understood by all. Avoid placing the burden of education on
|
||||
marginalized groups; please make an effort to look into your questions before asking others for
|
||||
detailed explanations.
|
||||
|
||||
4. **Be Open to Improving Inclusivity**: Actively participate in making our community more inclusive.
|
||||
Report behaviour that contradicts these guidelines (see Reporting and Enforcement below) and be
|
||||
open to constructive feedback aimed at improving our community. Understand that discussing
|
||||
negative experiences can be emotionally taxing; focus on the message, not the tone.
|
||||
|
||||
5. **Commit to Our Values**: Building an inclusive community requires ongoing effort from everyone.
|
||||
Recognise that addressing bias and discrimination is a continuous process that needs commitment
|
||||
and action from all members.
|
||||
|
||||
## Unacceptable Behaviors
|
||||
|
||||
To ensure everyone feels safe and welcome, the following behaviors are considered unacceptable
|
||||
within the Continuwuity community:
|
||||
|
||||
* **Harassment and Discrimination**: Avoid offensive comments related to background, family status,
|
||||
gender, gender identity or expression, marital status, sex, sexual orientation, native language,
|
||||
age, ability, race and/or ethnicity, caste, national origin, socioeconomic status, religion,
|
||||
geographic location, or any other dimension of diversity. Don't deliberately misgender someone or
|
||||
question the legitimacy of their gender identity.
|
||||
|
||||
* **Violence and Threats**: Do not engage in any form of violence or threats, including inciting
|
||||
violence towards anyone or encouraging self-harm. Posting or threatening to post someone else's
|
||||
personally identifying information ("doxxing") is also forbidden.
|
||||
|
||||
* **Personal Attacks**: Disagreements happen, but they should never turn into personal attacks.
|
||||
Don't insult, demean, or belittle others.
|
||||
|
||||
* **Unwelcome Attention or Contact**: Avoid unwelcome sexual attention, inappropriate physical
|
||||
contact (or simulation thereof), sexualized comments, jokes, or imagery.
|
||||
|
||||
* **Disruption**: Do not engage in sustained disruption of discussions, events, or other
|
||||
community activities.
|
||||
|
||||
* **Bad Faith Actions**: Do not intentionally make false reports or otherwise abuse the reporting
|
||||
process.
|
||||
|
||||
This is not an exhaustive list. Any behaviour that makes others feel unsafe or unwelcome may be
|
||||
subject to enforcement action.
|
||||
|
||||
## Matrix Community
|
||||
|
||||
These Community Guidelines apply to the entire
|
||||
[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) and its rooms, including:
|
||||
|
||||
### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
|
||||
|
||||
This room is for support and discussions about Continuwuity. Ask questions, share insights, and help
|
||||
each other out while adhering to these guidelines.
|
||||
|
||||
We ask that this room remain focused on the Continuwuity software specifically: the team are
|
||||
typically happy to engage in conversations about related subjects in the off-topic room.
|
||||
|
||||
### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
|
||||
|
||||
For off-topic community conversations about any subject. While this room allows for a wide range of
|
||||
topics, the same guidelines apply. Please keep discussions respectful and inclusive, and avoid
|
||||
divisive or stressful subjects like specific country/world politics unless handled with exceptional
|
||||
care and respect for diverse viewpoints.
|
||||
|
||||
General topics, such as world events, are welcome as long as they follow the guidelines. If a member
|
||||
of the team asks for the conversation to end, please respect their decision.
|
||||
|
||||
### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org)
|
||||
|
||||
This room is dedicated to discussing active development of Continuwuity, including ongoing issues or
|
||||
code development. Collaboration here must follow these guidelines, and please consider raising
|
||||
[an issue](https://forgejo.ellis.link/continuwuation/continuwuity/issues) on the repository to help
|
||||
track progress.
|
||||
|
||||
## Reporting and Enforcement
|
||||
|
||||
We take these Community Guidelines seriously to protect our community members. If you witness or
|
||||
experience unacceptable behaviour, or have any other concerns, please report it.
|
||||
|
||||
**How to Report:**
|
||||
|
||||
* **Alert Moderators in the Room:** If you feel comfortable doing so, you can address the issue
|
||||
publicly in the relevant room by mentioning the moderation bot, `@rock:continuwuity.org`, which
|
||||
will immediately alert all available moderators.
|
||||
* **Direct Message:** If you're not comfortable raising the issue publicly, please send a direct
|
||||
message (DM) to one of the room moderators.
|
||||
|
||||
Reports will be handled with discretion. We will investigate promptly and thoroughly.
|
||||
|
||||
**Enforcement Actions:**
|
||||
|
||||
Anyone asked to stop unacceptable behaviour is expected to comply immediately. Failure to do so, or
|
||||
engaging in prohibited behaviour, may result in enforcement action. Moderators may take actions they
|
||||
deem appropriate, including but not limited to:
|
||||
|
||||
1. **Warning**: A direct message or public warning identifying the violation and requesting
|
||||
corrective action.
|
||||
2. **Temporary Mute**: Temporary restriction from participating in discussions for a specified
|
||||
period.
|
||||
3. **Kick or Ban**: Removal from a room (kick) or the entire community space (ban). Egregious or
|
||||
repeated violations may result in an immediate ban. Bans are typically permanent and reviewed
|
||||
only in exceptional circumstances.
|
||||
|
||||
Retaliation against those who report concerns in good faith will not be tolerated and will be
|
||||
subject to the same enforcement actions.
|
||||
|
||||
Together, let's build and maintain a community where everyone feels valued, safe, and respected.
|
||||
|
||||
— The Continuwuity Moderation Team
|
|
@ -1,93 +0,0 @@
|
|||
# conduwuit Community Code of Conduct
|
||||
|
||||
Welcome to the conduwuit community! We’re excited to have you here. conduwuit is
|
||||
a hard-fork of the Conduit homeserver, aimed at making Matrix more accessible
|
||||
and inclusive for everyone.
|
||||
|
||||
This space is dedicated to fostering a positive, supportive, and inclusive
|
||||
environment for everyone. This Code of Conduct applies to all conduwuit spaces,
|
||||
including any further community rooms that reference this CoC. Here are our
|
||||
guidelines to help maintain the welcoming atmosphere that sets conduwuit apart.
|
||||
|
||||
For the general foundational rules, please refer to the [Contributor's
|
||||
Covenant](https://github.com/girlbossceo/conduwuit/blob/main/CODE_OF_CONDUCT.md).
|
||||
Below are additional guidelines specific to the conduwuit community.
|
||||
|
||||
## Our Values and Guidelines
|
||||
|
||||
1. **Respect and Inclusivity**: We are committed to maintaining a community
|
||||
where everyone feels safe and respected. Discrimination, harassment, or hate
|
||||
speech of any kind will not be tolerated. Recognise that each community member
|
||||
experiences the world differently based on their past experiences, background,
|
||||
and identity. Share your own experiences and be open to learning about others'
|
||||
diverse perspectives.
|
||||
|
||||
2. **Positivity and Constructiveness**: Engage in constructive discussions and
|
||||
support each other. If you feel angry, negative, or aggressive, take a break
|
||||
until you can participate in a positive and constructive manner. Process intense
|
||||
feelings with a friend or in a private setting before engaging in community
|
||||
conversations to help maintain a supportive and focused environment.
|
||||
|
||||
3. **Clarity and Understanding**: Our community includes neurodivergent
|
||||
individuals and those who may not appreciate sarcasm or subtlety. Communicate
|
||||
clearly and kindly, avoiding sarcasm and ensuring your messages are easily
|
||||
understood by all. Additionally, avoid putting the burden of education on
|
||||
marginalized groups by doing your own research before asking for explanations.
|
||||
|
||||
4. **Be Open to Inclusivity**: Actively engage in conversations about making our
|
||||
community more inclusive. Report discriminatory behavior to the moderators
|
||||
and be open to constructive feedback that aims to improve our community.
|
||||
Understand that discussing discrimination and negative experiences can be
|
||||
emotionally taxing, so focus on the message rather than critiquing the tone
|
||||
used.
|
||||
|
||||
5. **Commit to Inclusivity**: Building an inclusive community requires time,
|
||||
energy, and resources. Recognise that addressing discrimination and bias is
|
||||
an ongoing process that necessitates commitment and action from all community
|
||||
members.
|
||||
|
||||
## Matrix Community
|
||||
|
||||
This Code of Conduct applies to the entire [conduwuit Matrix
|
||||
Space](https://matrix.to/#/#conduwuit-space:puppygock.gay) and its rooms,
|
||||
including:
|
||||
|
||||
### [#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay)
|
||||
|
||||
This room is for support and discussions about conduwuit. Ask questions, share
|
||||
insights, and help each other out.
|
||||
|
||||
### [#conduwuit-offtopic:girlboss.ceo](https://matrix.to/#/#conduwuit-offtopic:girlboss.ceo)
|
||||
|
||||
For off-topic community conversations about any subject. While this room allows
|
||||
for a wide range of topics, the same CoC applies. Keep discussions respectful
|
||||
and inclusive, and avoid divisive subjects like country/world politics. General
|
||||
topics, such as world events, are welcome as long as they follow the CoC.
|
||||
|
||||
### [#conduwuit-dev:puppygock.gay](https://matrix.to/#/#conduwuit-dev:puppygock.gay)
|
||||
|
||||
This room is dedicated to discussing active development of conduwuit. Posting
|
||||
requires an elevated power level, which can be requested in one of the other
|
||||
rooms. Use this space to collaborate and innovate.
|
||||
|
||||
## Enforcement
|
||||
|
||||
We have a zero-tolerance policy for violations of this Code of Conduct. If
|
||||
someone’s behavior makes you uncomfortable, please report it to the moderators.
|
||||
Actions we may take include:
|
||||
|
||||
1. **Warning**: A warning given directly in the room or via a private message
|
||||
from the moderators, identifying the violation and requesting corrective
|
||||
action.
|
||||
2. **Temporary Mute**: Temporary restriction from participating in discussions
|
||||
for a specified period to allow for reflection and cooling off.
|
||||
3. **Kick or Ban**: Egregious behavior may result in an immediate kick or ban to
|
||||
protect other community members. Bans are considered permanent and will only
|
||||
be reversed in exceptional circumstances after proven good behavior.
|
||||
|
||||
Please highlight issues directly in rooms when possible, but if you don't feel
|
||||
comfortable doing that, then please send a DM to one of the moderators directly.
|
||||
|
||||
Together, let’s build a community where everyone feels valued and respected.
|
||||
|
||||
— The conduwuit Moderation Team
|
|
@ -1,10 +1,10 @@
|
|||
# Configuration
|
||||
|
||||
This chapter describes various ways to configure conduwuit.
|
||||
This chapter describes various ways to configure Continuwuity.
|
||||
|
||||
## Basics
|
||||
|
||||
conduwuit uses a config file for the majority of the settings, but also supports
|
||||
Continuwuity uses a config file for the majority of the settings, but also supports
|
||||
setting individual config options via commandline.
|
||||
|
||||
Please refer to the [example config
|
||||
|
@ -12,13 +12,13 @@ file](./configuration/examples.md#example-configuration) for all of those
|
|||
settings.
|
||||
|
||||
The config file to use can be specified on the commandline when running
|
||||
conduwuit by specifying the `-c`, `--config` flag. Alternatively, you can use
|
||||
Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use
|
||||
the environment variable `CONDUWUIT_CONFIG` to specify the config file to used.
|
||||
Conduit's environment variables are supported for backwards compatibility.
|
||||
|
||||
## Option commandline flag
|
||||
|
||||
conduwuit supports setting individual config options in TOML format from the
|
||||
Continuwuity supports setting individual config options in TOML format from the
|
||||
`-O` / `--option` flag. For example, you can set your server name via `-O
|
||||
server_name=\"example.com\"`.
|
||||
|
||||
|
@ -33,7 +33,7 @@ string. This does not apply to options that take booleans or numbers:
|
|||
|
||||
## Execute commandline flag
|
||||
|
||||
conduwuit supports running admin commands on startup using the commandline
|
||||
Continuwuity supports running admin commands on startup using the commandline
|
||||
argument `--execute`. The most notable use for this is to create an admin user
|
||||
on first startup.
|
||||
|
||||
|
|
|
@ -9,24 +9,11 @@
|
|||
|
||||
</details>
|
||||
|
||||
## Debian systemd unit file
|
||||
## systemd unit file
|
||||
|
||||
<details>
|
||||
<summary>Debian systemd unit file</summary>
|
||||
<summary>systemd unit file</summary>
|
||||
|
||||
```
|
||||
{{#include ../../debian/conduwuit.service}}
|
||||
{{#include ../../pkg/conduwuit.service}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Arch Linux systemd unit file
|
||||
|
||||
<details>
|
||||
<summary>Arch Linux systemd unit file</summary>
|
||||
|
||||
```
|
||||
{{#include ../../arch/conduwuit.service}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# Deploying
|
||||
|
||||
This chapter describes various ways to deploy conduwuit.
|
||||
This chapter describes various ways to deploy Continuwuity.
|
||||
|
|
|
@ -1,15 +1,5 @@
|
|||
# conduwuit for Arch Linux
|
||||
# Continuwuity for Arch Linux
|
||||
|
||||
Currently conduwuit is only on the Arch User Repository (AUR).
|
||||
Continuwuity is available in the `archlinuxcn` repository and AUR with the same package name `continuwuity`, which includes the latest tagged version. The development version is available on AUR as `continuwuity-git`.
|
||||
|
||||
The conduwuit AUR packages are community maintained and are not maintained by
|
||||
conduwuit development team, but the AUR package maintainers are in the Matrix
|
||||
room. Please attempt to verify your AUR package's PKGBUILD file looks fine
|
||||
before asking for support.
|
||||
|
||||
- [conduwuit](https://aur.archlinux.org/packages/conduwuit) - latest tagged
|
||||
conduwuit
|
||||
- [conduwuit-git](https://aur.archlinux.org/packages/conduwuit-git) - latest git
|
||||
conduwuit from `main` branch
|
||||
- [conduwuit-bin](https://aur.archlinux.org/packages/conduwuit-bin) - latest
|
||||
tagged conduwuit static binary
|
||||
Simply install the `continuwuity` package. Configure the service in `/etc/conduwuit/conduwuit.toml`, then enable and start the continuwuity.service.
|
||||
|
|
|
@ -1 +1 @@
|
|||
{{#include ../../debian/README.md}}
|
||||
{{#include ../../pkg/debian/README.md}}
|
||||
|
|
|
@ -1,48 +1,58 @@
|
|||
# conduwuit - Behind Traefik Reverse Proxy
|
||||
# Continuwuity - Behind Traefik Reverse Proxy
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image,
|
||||
### then you are ready to go.
|
||||
image: girlbossceo/conduwuit:latest
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- db:/var/lib/conduwuit
|
||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||
- "traefik.http.routers.continuwuity.entrypoints=websecure" # your HTTPS entry point
|
||||
- "traefik.http.routers.continuwuity.tls=true"
|
||||
- "traefik.http.routers.continuwuity.service=continuwuity"
|
||||
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
|
||||
# possibly, depending on your config:
|
||||
# - "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
|
||||
environment:
|
||||
CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
||||
CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label
|
||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUWUIT_LOG: warn,state_res=warn
|
||||
CONDUWUIT_ADDRESS: 0.0.0.0
|
||||
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
|
||||
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||
CONTINUWUITY_PORT: 6167 # should match the loadbalancer traefik label
|
||||
CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONTINUWUITY_ALLOW_REGISTRATION: 'true'
|
||||
CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONTINUWUITY_ALLOW_FEDERATION: 'true'
|
||||
CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||
|
||||
# We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN
|
||||
# variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate
|
||||
# We need some way to serve the client and server .well-known json. The simplest way is via the CONTINUWUITY_WELL_KNOWN
|
||||
# variable / config option, there are multiple ways to do this, e.g. in the continuwuity.toml file, and in a separate
|
||||
# see the override file for more information about delegation
|
||||
CONDUWUIT_WELL_KNOWN: |
|
||||
CONTINUWUITY_WELL_KNOWN: |
|
||||
{
|
||||
client=https://your.server.name.example,
|
||||
server=your.server.name.example:443
|
||||
}
|
||||
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
|
||||
ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||
ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||
nofile:
|
||||
soft: 1048567
|
||||
hard: 1048567
|
||||
|
||||
### Uncomment if you want to use your own Element-Web App.
|
||||
### Note: You need to provide a config.json for Element and you also need a second
|
||||
### Domain or Subdomain for the communication between Element and conduwuit
|
||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||
# element-web:
|
||||
# image: vectorim/element-web:latest
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# conduwuit - Traefik Reverse Proxy Labels
|
||||
# Continuwuity - Traefik Reverse Proxy Labels
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
|
@ -6,17 +6,17 @@ services:
|
|||
- "traefik.enable=true"
|
||||
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
||||
|
||||
- "traefik.http.routers.to-conduwuit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which conduwuit is hosted
|
||||
- "traefik.http.routers.to-conduwuit.tls=true"
|
||||
- "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker"
|
||||
- "traefik.http.services.to_conduwuit.loadbalancer.server.port=6167"
|
||||
- "traefik.http.routers.to-continuwuity.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Continuwuity is hosted
|
||||
- "traefik.http.routers.to-continuwuity.tls=true"
|
||||
- "traefik.http.routers.to-continuwuity.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.to-continuwuity.middlewares=cors-headers@docker"
|
||||
- "traefik.http.services.to_continuwuity.loadbalancer.server.port=6167"
|
||||
|
||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
|
||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
|
||||
|
||||
# If you want to have your account on <DOMAIN>, but host conduwuit on a subdomain,
|
||||
# If you want to have your account on <DOMAIN>, but host Continuwuity on a subdomain,
|
||||
# you can let it only handle the well known file on that domain instead
|
||||
#- "traefik.http.routers.to-matrix-wellknown.rule=Host(`<DOMAIN>`) && PathPrefix(`/.well-known/matrix`)"
|
||||
#- "traefik.http.routers.to-matrix-wellknown.tls=true"
|
||||
|
@ -34,4 +34,3 @@ services:
|
|||
# - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt"
|
||||
|
||||
# vim: ts=2:sw=2:expandtab
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
services:
|
||||
caddy:
|
||||
# This compose file uses caddy-docker-proxy as the reverse proxy for conduwuit!
|
||||
# This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity!
|
||||
# For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy
|
||||
image: lucaslorentz/caddy-docker-proxy:ci-alpine
|
||||
ports:
|
||||
|
@ -20,27 +20,28 @@ services:
|
|||
caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}}
|
||||
|
||||
homeserver:
|
||||
### If you already built the conduwuit image with 'docker build' or want to use a registry image,
|
||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
||||
### then you are ready to go.
|
||||
image: girlbossceo/conduwuit:latest
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- db:/var/lib/conduwuit
|
||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
environment:
|
||||
CONDUWUIT_SERVER_NAME: example.com # EDIT THIS
|
||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
||||
CONDUWUIT_PORT: 6167
|
||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUWUIT_LOG: warn,state_res=warn
|
||||
CONDUWUIT_ADDRESS: 0.0.0.0
|
||||
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
|
||||
CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS
|
||||
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||
CONTINUWUITY_PORT: 6167
|
||||
CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONTINUWUITY_ALLOW_REGISTRATION: 'true'
|
||||
CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONTINUWUITY_ALLOW_FEDERATION: 'true'
|
||||
CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||
networks:
|
||||
- caddy
|
||||
labels:
|
||||
|
|
|
@ -1,56 +1,65 @@
|
|||
# conduwuit - Behind Traefik Reverse Proxy
|
||||
# Continuwuity - Behind Traefik Reverse Proxy
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image,
|
||||
### If you already built the Continuwuity image with 'docker build' or want to use the Docker Hub image,
|
||||
### then you are ready to go.
|
||||
image: girlbossceo/conduwuit:latest
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- db:/var/lib/conduwuit
|
||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||
- db:/var/lib/continuwuity
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||
- "traefik.http.routers.continuwuity.entrypoints=websecure"
|
||||
- "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
|
||||
# Uncomment and adjust the following if you want to use middleware
|
||||
# - "traefik.http.routers.continuwuity.middlewares=secureHeaders@file"
|
||||
environment:
|
||||
CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
CONDUWUIT_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this
|
||||
CONDUWUIT_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server
|
||||
#CONDUWUIT_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read
|
||||
CONDUWUIT_ADDRESS: 0.0.0.0
|
||||
CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it
|
||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
||||
#CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above
|
||||
### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too
|
||||
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
CONTINUWUITY_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this
|
||||
CONTINUWUITY_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server
|
||||
#CONTINUWUITY_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read
|
||||
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||
CONTINUWUITY_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it
|
||||
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||
### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too
|
||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||
# CONDUWUIT_LOG: info # default is: "warn,state_res=warn"
|
||||
# CONDUWUIT_ALLOW_ENCRYPTION: 'true'
|
||||
# CONDUWUIT_ALLOW_FEDERATION: 'true'
|
||||
# CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
# CONDUWUIT_ALLOW_INCOMING_PRESENCE: true
|
||||
# CONDUWUIT_ALLOW_OUTGOING_PRESENCE: true
|
||||
# CONDUWUIT_ALLOW_LOCAL_PRESENCE: true
|
||||
# CONDUWUIT_WORKERS: 10
|
||||
# CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
# CONDUWUIT_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧"
|
||||
# CONTINUWUITY_LOG: info # default is: "warn,state_res=warn"
|
||||
# CONTINUWUITY_ALLOW_ENCRYPTION: 'true'
|
||||
# CONTINUWUITY_ALLOW_FEDERATION: 'true'
|
||||
# CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
# CONTINUWUITY_ALLOW_INCOMING_PRESENCE: true
|
||||
# CONTINUWUITY_ALLOW_OUTGOING_PRESENCE: true
|
||||
# CONTINUWUITY_ALLOW_LOCAL_PRESENCE: true
|
||||
# CONTINUWUITY_WORKERS: 10
|
||||
# CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
# CONTINUWUITY_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧"
|
||||
|
||||
# We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN
|
||||
# variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate
|
||||
# We need some way to serve the client and server .well-known json. The simplest way is via the CONTINUWUITY_WELL_KNOWN
|
||||
# variable / config option, there are multiple ways to do this, e.g. in the continuwuity.toml file, and in a separate
|
||||
# reverse proxy, but since you do not have a reverse proxy and following this guide, this example is included
|
||||
CONDUWUIT_WELL_KNOWN: |
|
||||
CONTINUWUITY_WELL_KNOWN: |
|
||||
{
|
||||
client=https://your.server.name.example,
|
||||
server=your.server.name.example:443
|
||||
}
|
||||
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
|
||||
ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||
ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||
nofile:
|
||||
soft: 1048567
|
||||
hard: 1048567
|
||||
|
||||
### Uncomment if you want to use your own Element-Web App.
|
||||
### Note: You need to provide a config.json for Element and you also need a second
|
||||
### Domain or Subdomain for the communication between Element and conduwuit
|
||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||
# element-web:
|
||||
# image: vectorim/element-web:latest
|
||||
|
|
|
@ -1,34 +1,34 @@
|
|||
# conduwuit
|
||||
# Continuwuity
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
### If you already built the conduwuit image with 'docker build' or want to use a registry image,
|
||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
||||
### then you are ready to go.
|
||||
image: girlbossceo/conduwuit:latest
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8448:6167
|
||||
volumes:
|
||||
- db:/var/lib/conduwuit
|
||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||
- db:/var/lib/continuwuity
|
||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
environment:
|
||||
CONDUWUIT_SERVER_NAME: your.server.name # EDIT THIS
|
||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
||||
CONDUWUIT_PORT: 6167
|
||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUWUIT_LOG: warn,state_res=warn
|
||||
CONDUWUIT_ADDRESS: 0.0.0.0
|
||||
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
|
||||
CONTINUWUITY_SERVER_NAME: your.server.name # EDIT THIS
|
||||
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||
CONTINUWUITY_PORT: 6167
|
||||
CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||
CONTINUWUITY_ALLOW_REGISTRATION: 'true'
|
||||
CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||
#CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||
CONTINUWUITY_ALLOW_FEDERATION: 'true'
|
||||
CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||
#
|
||||
### Uncomment if you want to use your own Element-Web App.
|
||||
### Note: You need to provide a config.json for Element and you also need a second
|
||||
### Domain or Subdomain for the communication between Element and conduwuit
|
||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||
# element-web:
|
||||
# image: vectorim/element-web:latest
|
||||
|
|
|
@ -1,31 +1,20 @@
|
|||
# conduwuit for Docker
|
||||
# Continuwuity for Docker
|
||||
|
||||
## Docker
|
||||
|
||||
To run conduwuit with Docker you can either build the image yourself or pull it
|
||||
To run Continuwuity with Docker, you can either build the image yourself or pull it
|
||||
from a registry.
|
||||
|
||||
### Use a registry
|
||||
|
||||
OCI images for conduwuit are available in the registries listed below.
|
||||
OCI images for Continuwuity are available in the registries listed below.
|
||||
|
||||
| Registry | Image | Size | Notes |
|
||||
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
||||
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||
| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. |
|
||||
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. |
|
||||
| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. |
|
||||
| Registry | Image | Notes |
|
||||
| --------------- | --------------------------------------------------------------- | -----------------------|
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest][fj] | Latest tagged image. |
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main][fj] | Main branch image. |
|
||||
|
||||
[dh]: https://hub.docker.com/r/girlbossceo/conduwuit
|
||||
[gh]: https://github.com/girlbossceo/conduwuit/pkgs/container/conduwuit
|
||||
[gl]: https://gitlab.com/conduwuit/conduwuit/container_registry/6369729
|
||||
[shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest
|
||||
[shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main
|
||||
|
||||
OCI image `.tar.gz` files are also hosted directly at when uploaded by CI with a
|
||||
commit hash/revision or a tagged release: <https://pup.systems/~strawberry/conduwuit/>
|
||||
[fj]: https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity
|
||||
|
||||
Use
|
||||
|
||||
|
@ -37,35 +26,35 @@ to pull it to your machine.
|
|||
|
||||
### Run
|
||||
|
||||
When you have the image you can simply run it with
|
||||
When you have the image, you can simply run it with
|
||||
|
||||
```bash
|
||||
docker run -d -p 8448:6167 \
|
||||
-v db:/var/lib/conduwuit/ \
|
||||
-e CONDUWUIT_SERVER_NAME="your.server.name" \
|
||||
-e CONDUWUIT_ALLOW_REGISTRATION=false \
|
||||
--name conduwuit $LINK
|
||||
-v db:/var/lib/continuwuity/ \
|
||||
-e CONTINUWUITY_SERVER_NAME="your.server.name" \
|
||||
-e CONTINUWUITY_ALLOW_REGISTRATION=false \
|
||||
--name continuwuity $LINK
|
||||
```
|
||||
|
||||
or you can use [docker compose](#docker-compose).
|
||||
or you can use [Docker Compose](#docker-compose).
|
||||
|
||||
The `-d` flag lets the container run in detached mode. You may supply an
|
||||
optional `conduwuit.toml` config file, the example config can be found
|
||||
optional `continuwuity.toml` config file, the example config can be found
|
||||
[here](../configuration/examples.md). You can pass in different env vars to
|
||||
change config values on the fly. You can even configure conduwuit completely by
|
||||
change config values on the fly. You can even configure Continuwuity completely by
|
||||
using env vars. For an overview of possible values, please take a look at the
|
||||
[`docker-compose.yml`](docker-compose.yml) file.
|
||||
|
||||
If you just want to test conduwuit for a short time, you can use the `--rm`
|
||||
flag, which will clean up everything related to your container after you stop
|
||||
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
||||
flag, which cleans up everything related to your container after you stop
|
||||
it.
|
||||
|
||||
### Docker-compose
|
||||
|
||||
If the `docker run` command is not for you or your setup, you can also use one
|
||||
If the `docker run` command is not suitable for you or your setup, you can also use one
|
||||
of the provided `docker-compose` files.
|
||||
|
||||
Depending on your proxy setup, you can use one of the following files;
|
||||
Depending on your proxy setup, you can use one of the following files:
|
||||
|
||||
- If you already have a `traefik` instance set up, use
|
||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
|
||||
|
@ -76,7 +65,7 @@ Depending on your proxy setup, you can use one of the following files;
|
|||
`example.com` placeholders with your own domain
|
||||
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
|
||||
|
||||
When picking the traefik-related compose file, rename it so it matches
|
||||
When picking the Traefik-related compose file, rename it to
|
||||
`docker-compose.yml`, and rename the override file to
|
||||
`docker-compose.override.yml`. Edit the latter with the values you want for your
|
||||
server.
|
||||
|
@ -88,40 +77,40 @@ create the `caddy` network before spinning up the containers:
|
|||
docker network create caddy
|
||||
```
|
||||
|
||||
After that, you can rename it so it matches `docker-compose.yml` and spin up the
|
||||
After that, you can rename it to `docker-compose.yml` and spin up the
|
||||
containers!
|
||||
|
||||
Additional info about deploying conduwuit can be found [here](generic.md).
|
||||
Additional info about deploying Continuwuity can be found [here](generic.md).
|
||||
|
||||
### Build
|
||||
|
||||
Official conduwuit images are built using Nix's
|
||||
[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are
|
||||
repeatable and reproducible by anyone, keeps the images lightweight, and can be
|
||||
built offline.
|
||||
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables efficient multi-platform builds.
|
||||
|
||||
This also ensures portability of our images because `buildLayeredImage` builds
|
||||
OCI images, not Docker images, and works with other container software.
|
||||
The resulting images are widely compatible with Docker and other container runtimes like Podman or containerd.
|
||||
|
||||
The OCI images are OS-less with only a very minimal environment of the `tini`
|
||||
init system, CA certificates, and the conduwuit binary. This does mean there is
|
||||
not a shell, but in theory you can get a shell by adding the necessary layers
|
||||
to the layered image. However it's very unlikely you will need a shell for any
|
||||
real troubleshooting.
|
||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates, and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition.
|
||||
|
||||
The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def].
|
||||
To build an image locally using Docker Buildx, you can typically run a command like:
|
||||
|
||||
To build an OCI image using Nix, the following outputs can be built:
|
||||
- `nix build -L .#oci-image` (default features, x86_64 glibc)
|
||||
- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl)
|
||||
- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl)
|
||||
- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl)
|
||||
- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl)
|
||||
```bash
|
||||
# Build for the current platform and load into the local Docker daemon
|
||||
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
||||
|
||||
# Example: Build for specific platforms and push to a registry.
|
||||
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
||||
|
||||
# Example: Build binary optimized for the current CPU
|
||||
# docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=native -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
Refer to the Docker Buildx documentation for more advanced build options.
|
||||
|
||||
[dockerfile-path]: ../../docker/Dockerfile
|
||||
|
||||
### Run
|
||||
|
||||
If you already have built the image or want to use one from the registries, you
|
||||
can just start the container and everything else in the compose file in detached
|
||||
If you have already built the image or want to use one from the registries, you
|
||||
can start the container and everything else in the compose file in detached
|
||||
mode with:
|
||||
|
||||
```bash
|
||||
|
@ -132,25 +121,26 @@ docker compose up -d
|
|||
|
||||
### Use Traefik as Proxy
|
||||
|
||||
As a container user, you probably know about Traefik. It is a easy to use
|
||||
reverse proxy for making containerized app and services available through the
|
||||
As a container user, you probably know about Traefik. It is an easy-to-use
|
||||
reverse proxy for making containerized apps and services available through the
|
||||
web. With the two provided files,
|
||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
||||
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy
|
||||
to deploy and use conduwuit, with a little caveat. If you already took a look at
|
||||
the files, then you should have seen the `well-known` service, and that is the
|
||||
little caveat. Traefik is simply a proxy and loadbalancer and is not able to
|
||||
serve any kind of content, but for conduwuit to federate, we need to either
|
||||
expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client`
|
||||
to deploy and use Continuwuity, with a small caveat. If you have already looked at
|
||||
the files, you should have seen the `well-known` service, which is the
|
||||
small caveat. Traefik is simply a proxy and load balancer and cannot
|
||||
serve any kind of content. For Continuwuity to federate, we need to either
|
||||
expose ports `443` and `8448` or serve two endpoints: `.well-known/matrix/client`
|
||||
and `.well-known/matrix/server`.
|
||||
|
||||
With the service `well-known` we use a single `nginx` container that will serve
|
||||
With the service `well-known`, we use a single `nginx` container that serves
|
||||
those two files.
|
||||
|
||||
Alternatively, you can use Continuwuity's built-in delegation file capability. Set up the delegation files in the configuration file, and then proxy paths under `/.well-known/matrix` to continuwuity. For example, the label ``traefik.http.routers.continuwuity.rule=(Host(`matrix.ellis.link`) || (Host(`ellis.link`) && PathPrefix(`/.well-known/matrix`)))`` does this for the domain `ellis.link`.
|
||||
|
||||
## Voice communication
|
||||
|
||||
See the [TURN](../turn.md) page.
|
||||
|
||||
[nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage
|
||||
[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# conduwuit for FreeBSD
|
||||
# Continuwuity for FreeBSD
|
||||
|
||||
conduwuit at the moment does not provide FreeBSD builds or have FreeBSD packaging, however conduwuit does build and work on FreeBSD using the system-provided RocksDB.
|
||||
Continuwuity currently does not provide FreeBSD builds or FreeBSD packaging. However, Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
|
||||
|
||||
Contributions for getting conduwuit packaged are welcome.
|
||||
Contributions to get Continuwuity packaged for FreeBSD are welcome.
|
||||
|
|
|
@ -2,44 +2,53 @@
|
|||
|
||||
> ### Getting help
|
||||
>
|
||||
> If you run into any problems while setting up conduwuit, ask us in
|
||||
> `#conduwuit:puppygock.gay` or [open an issue on
|
||||
> GitHub](https://github.com/girlbossceo/conduwuit/issues/new).
|
||||
> If you run into any problems while setting up Continuwuity, ask us in
|
||||
> `#continuwuity:continuwuity.org` or [open an issue on
|
||||
> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new).
|
||||
|
||||
## Installing conduwuit
|
||||
## Installing Continuwuity
|
||||
|
||||
### Static prebuilt binary
|
||||
|
||||
You may simply download the binary that fits your machine architecture (x86_64
|
||||
or aarch64). Run `uname -m` to see what you need.
|
||||
|
||||
Prebuilt fully static musl binaries can be downloaded from the latest tagged
|
||||
release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or
|
||||
`main` CI branch workflow artifact output. These also include Debian/Ubuntu
|
||||
You can download prebuilt fully static musl binaries from the latest tagged
|
||||
release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or
|
||||
from the `main` CI branch workflow artifact output. These also include Debian/Ubuntu
|
||||
packages.
|
||||
|
||||
Binaries are also available on my website directly at: <https://pup.systems/~strawberry/conduwuit/>
|
||||
|
||||
These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit
|
||||
You can download these directly using curl. The `ci-bins` are CI workflow binaries organized by commit
|
||||
hash/revision, and `releases` are tagged releases. Sort by descending last
|
||||
modified for the latest.
|
||||
modified date to find the latest.
|
||||
|
||||
These binaries have jemalloc and io_uring statically linked and included with
|
||||
them, so no additional dynamic dependencies need to be installed.
|
||||
|
||||
For the **best** performance; if using an `x86_64` CPU made in the last ~15 years,
|
||||
we recommend using the `-haswell-` optimised binaries. This sets
|
||||
`-march=haswell` which is the most compatible and highest performance with
|
||||
optimised binaries. The database backend, RocksDB, most benefits from this as it
|
||||
will then use hardware accelerated CRC32 hashing/checksumming which is critical
|
||||
For the **best** performance: if you are using an `x86_64` CPU made in the last ~15 years,
|
||||
we recommend using the `-haswell-` optimized binaries. These set
|
||||
`-march=haswell`, which provides the most compatible and highest performance with
|
||||
optimized binaries. The database backend, RocksDB, benefits most from this as it
|
||||
uses hardware-accelerated CRC32 hashing/checksumming, which is critical
|
||||
for performance.
|
||||
|
||||
### Compiling
|
||||
|
||||
Alternatively, you may compile the binary yourself. We recommend using
|
||||
Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most
|
||||
guaranteed reproducibiltiy and easiest to get a build environment and output
|
||||
going. This also allows easy cross-compilation.
|
||||
Alternatively, you may compile the binary yourself.
|
||||
|
||||
### Building with the Rust toolchain
|
||||
|
||||
If wanting to build using standard Rust toolchains, make sure you install:
|
||||
|
||||
- (On linux) `liburing-dev` on the compiling machine, and `liburing` on the target host
|
||||
- (On linux) `pkg-config` on the compiling machine to allow finding `liburing`
|
||||
- A C++ compiler and (on linux) `libclang` for RocksDB
|
||||
|
||||
You can build Continuwuity using `cargo build --release`.
|
||||
|
||||
### Building with Nix
|
||||
|
||||
If you prefer, you can use Nix (or [Lix](https://lix.systems)) to build Continuwuity. This provides improved reproducibility and makes it easy to set up a build environment and generate output. This approach also allows for easy cross-compilation.
|
||||
|
||||
You can run the `nix build -L .#static-x86_64-linux-musl-all-features` or
|
||||
`nix build -L .#static-aarch64-linux-musl-all-features` commands based
|
||||
|
@ -47,44 +56,38 @@ on architecture to cross-compile the necessary static binary located at
|
|||
`result/bin/conduwuit`. This is reproducible with the static binaries produced
|
||||
in our CI.
|
||||
|
||||
If wanting to build using standard Rust toolchains, make sure you install:
|
||||
- `liburing-dev` on the compiling machine, and `liburing` on the target host
|
||||
- LLVM and libclang for RocksDB
|
||||
## Adding a Continuwuity user
|
||||
|
||||
You can build conduwuit using `cargo build --release --all-features`
|
||||
While Continuwuity can run as any user, it is better to use dedicated users for
|
||||
different services. This also ensures that the file permissions
|
||||
are set up correctly.
|
||||
|
||||
## Adding a conduwuit user
|
||||
|
||||
While conduwuit can run as any user it is better to use dedicated users for
|
||||
different services. This also allows you to make sure that the file permissions
|
||||
are correctly set up.
|
||||
|
||||
In Debian, you can use this command to create a conduwuit user:
|
||||
In Debian, you can use this command to create a Continuwuity user:
|
||||
|
||||
```bash
|
||||
sudo adduser --system conduwuit --group --disabled-login --no-create-home
|
||||
sudo adduser --system continuwuity --group --disabled-login --no-create-home
|
||||
```
|
||||
|
||||
For distros without `adduser` (or where it's a symlink to `useradd`):
|
||||
|
||||
```bash
|
||||
sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit
|
||||
sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity
|
||||
```
|
||||
|
||||
## Forwarding ports in the firewall or the router
|
||||
|
||||
Matrix's default federation port is port 8448, and clients must be using port 443.
|
||||
If you would like to use only port 443, or a different port, you will need to setup
|
||||
delegation. conduwuit has config options for doing delegation, or you can configure
|
||||
your reverse proxy to manually serve the necessary JSON files to do delegation
|
||||
Matrix's default federation port is 8448, and clients must use port 443.
|
||||
If you would like to use only port 443 or a different port, you will need to set up
|
||||
delegation. Continuwuity has configuration options for delegation, or you can configure
|
||||
your reverse proxy to manually serve the necessary JSON files for delegation
|
||||
(see the `[global.well_known]` config section).
|
||||
|
||||
If conduwuit runs behind a router or in a container and has a different public
|
||||
IP address than the host system these public ports need to be forwarded directly
|
||||
or indirectly to the port mentioned in the config.
|
||||
If Continuwuity runs behind a router or in a container and has a different public
|
||||
IP address than the host system, you need to forward these public ports directly
|
||||
or indirectly to the port mentioned in the configuration.
|
||||
|
||||
Note for NAT users; if you have trouble connecting to your server from the inside
|
||||
of your network, you need to research your router and see if it supports "NAT
|
||||
Note for NAT users: if you have trouble connecting to your server from inside
|
||||
your network, check if your router supports "NAT
|
||||
hairpinning" or "NAT loopback".
|
||||
|
||||
If your router does not support this feature, you need to research doing local
|
||||
|
@ -94,19 +97,19 @@ on the network level, consider something like NextDNS or Pi-Hole.
|
|||
|
||||
## Setting up a systemd service
|
||||
|
||||
Two example systemd units for conduwuit can be found
|
||||
You can find two example systemd units for Continuwuity
|
||||
[on the configuration page](../configuration/examples.md#debian-systemd-unit-file).
|
||||
You may need to change the `ExecStart=` path to where you placed the conduwuit
|
||||
binary if it is not `/usr/bin/conduwuit`.
|
||||
You may need to change the `ExecStart=` path to match where you placed the Continuwuity
|
||||
binary if it is not in `/usr/bin/conduwuit`.
|
||||
|
||||
On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros
|
||||
and OpenSUSE), put `$EscapeControlCharactersOnReceive off` inside
|
||||
`/etc/rsyslog.conf` to allow color in logs.
|
||||
|
||||
If you are using a different `database_path` other than the systemd unit
|
||||
If you are using a different `database_path` than the systemd unit's
|
||||
configured default `/var/lib/conduwuit`, you need to add your path to the
|
||||
systemd unit's `ReadWritePaths=`. This can be done by either directly editing
|
||||
`conduwuit.service` and reloading systemd, or running `systemctl edit conduwuit.service`
|
||||
systemd unit's `ReadWritePaths=`. You can do this by either directly editing
|
||||
`conduwuit.service` and reloading systemd, or by running `systemctl edit conduwuit.service`
|
||||
and entering the following:
|
||||
|
||||
```
|
||||
|
@ -114,10 +117,10 @@ and entering the following:
|
|||
ReadWritePaths=/path/to/custom/database/path
|
||||
```
|
||||
|
||||
## Creating the conduwuit configuration file
|
||||
## Creating the Continuwuity configuration file
|
||||
|
||||
Now we need to create the conduwuit's config file in
|
||||
`/etc/conduwuit/conduwuit.toml`. The example config can be found at
|
||||
Now you need to create the Continuwuity configuration file in
|
||||
`/etc/continuwuity/continuwuity.toml`. You can find an example configuration at
|
||||
[conduwuit-example.toml](../configuration/examples.md).
|
||||
|
||||
**Please take a moment to read the config. You need to change at least the
|
||||
|
@ -127,8 +130,8 @@ RocksDB is the only supported database backend.
|
|||
|
||||
## Setting the correct file permissions
|
||||
|
||||
If you are using a dedicated user for conduwuit, you will need to allow it to
|
||||
read the config. To do that you can run this:
|
||||
If you are using a dedicated user for Continuwuity, you need to allow it to
|
||||
read the configuration. To do this, run:
|
||||
|
||||
```bash
|
||||
sudo chown -R root:root /etc/conduwuit
|
||||
|
@ -139,19 +142,19 @@ If you use the default database path you also need to run this:
|
|||
|
||||
```bash
|
||||
sudo mkdir -p /var/lib/conduwuit/
|
||||
sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/
|
||||
sudo chown -R continuwuity:continuwuity /var/lib/conduwuit/
|
||||
sudo chmod 700 /var/lib/conduwuit/
|
||||
```
|
||||
|
||||
## Setting up the Reverse Proxy
|
||||
|
||||
We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults.
|
||||
We recommend Caddy as a reverse proxy because it is trivial to use and handles TLS certificates, reverse proxy headers, etc. transparently with proper defaults.
|
||||
For other software, please refer to their respective documentation or online guides.
|
||||
|
||||
### Caddy
|
||||
|
||||
After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile`
|
||||
and enter this (substitute for your server name).
|
||||
and enter the following (substitute your actual server name):
|
||||
|
||||
```caddyfile
|
||||
your.server.name, your.server.name:8448 {
|
||||
|
@ -170,17 +173,17 @@ sudo systemctl enable --now caddy
|
|||
|
||||
### Other Reverse Proxies
|
||||
|
||||
As we would prefer our users to use Caddy, we will not provide configuration files for other proxys.
|
||||
As we prefer our users to use Caddy, we do not provide configuration files for other proxies.
|
||||
|
||||
You will need to reverse proxy everything under following routes:
|
||||
You will need to reverse proxy everything under the following routes:
|
||||
- `/_matrix/` - core Matrix C-S and S-S APIs
|
||||
- `/_conduwuit/` - ad-hoc conduwuit routes such as `/local_user_count` and
|
||||
- `/_conduwuit/` and/or `/_continuwuity/` - ad-hoc Continuwuity routes such as `/local_user_count` and
|
||||
`/server_version`
|
||||
|
||||
You can optionally reverse proxy the following individual routes:
|
||||
- `/.well-known/matrix/client` and `/.well-known/matrix/server` if using
|
||||
conduwuit to perform delegation (see the `[global.well_known]` config section)
|
||||
- `/.well-known/matrix/support` if using conduwuit to send the homeserver admin
|
||||
Continuwuity to perform delegation (see the `[global.well_known]` config section)
|
||||
- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin
|
||||
contact and support page (formerly known as MSC1929)
|
||||
- `/` if you would like to see `hewwo from conduwuit woof!` at the root
|
||||
|
||||
|
@ -195,21 +198,21 @@ Examples of delegation:
|
|||
|
||||
For Apache and Nginx there are many examples available online.
|
||||
|
||||
Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization
|
||||
header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here.
|
||||
Lighttpd is not supported as it appears to interfere with the `X-Matrix` Authorization
|
||||
header, making federation non-functional. If you find a workaround, please share it so we can add it to this documentation.
|
||||
|
||||
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can).
|
||||
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from interfering with the `X-Matrix` header (note that Apache is not ideal as a general reverse proxy, so we discourage using it if alternatives are available).
|
||||
|
||||
If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so:
|
||||
If using Nginx, you need to pass the request URI to Continuwuity using `$request_uri`, like this:
|
||||
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
||||
- `proxy_pass http://127.0.0.1:6167;`
|
||||
|
||||
Nginx users need to increase `client_max_body_size` (default is 1M) to match
|
||||
Nginx users need to increase the `client_max_body_size` setting (default is 1M) to match the
|
||||
`max_request_size` defined in conduwuit.toml.
|
||||
|
||||
## You're done
|
||||
|
||||
Now you can start conduwuit with:
|
||||
Now you can start Continuwuity with:
|
||||
|
||||
```bash
|
||||
sudo systemctl start conduwuit
|
||||
|
@ -224,7 +227,7 @@ sudo systemctl enable conduwuit
|
|||
## How do I know it works?
|
||||
|
||||
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your
|
||||
homeserver and try to register.
|
||||
homeserver address, and try to register.
|
||||
|
||||
You can also use these commands as a quick health check (replace
|
||||
`your.server.name`).
|
||||
|
@ -239,10 +242,10 @@ curl https://your.server.name:8448/_conduwuit/server_version
|
|||
curl https://your.server.name:8448/_matrix/federation/v1/version
|
||||
```
|
||||
|
||||
- To check if your server can talk with other homeservers, you can use the
|
||||
- To check if your server can communicate with other homeservers, use the
|
||||
[Matrix Federation Tester](https://federationtester.matrix.org/). If you can
|
||||
register but cannot join federated rooms check your config again and also check
|
||||
if the port 8448 is open and forwarded correctly.
|
||||
register but cannot join federated rooms, check your configuration and verify
|
||||
that port 8448 is open and forwarded correctly.
|
||||
|
||||
# What's next?
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
# conduwuit for Kubernetes
|
||||
# Continuwuity for Kubernetes
|
||||
|
||||
conduwuit doesn't support horizontal scalability or distributed loading
|
||||
natively, however a community maintained Helm Chart is available here to run
|
||||
Continuwuity doesn't support horizontal scalability or distributed loading
|
||||
natively. However, a community-maintained Helm Chart is available here to run
|
||||
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
|
||||
|
||||
Should changes need to be made, please reach out to the maintainer in our
|
||||
Matrix room as this is not maintained/controlled by the conduwuit maintainers.
|
||||
This should be compatible with Continuwuity, but you will need to change the image reference.
|
||||
|
||||
If changes need to be made, please reach out to the maintainer, as this is not maintained or controlled by the Continuwuity maintainers.
|
||||
|
|
|
@ -1,108 +1,130 @@
|
|||
# conduwuit for NixOS
|
||||
# Continuwuity for NixOS
|
||||
|
||||
conduwuit can be acquired by Nix (or [Lix][lix]) from various places:
|
||||
NixOS packages Continuwuity as `matrix-continuwuity`. This package includes both the Continuwuity software and a dedicated NixOS module for configuration and deployment.
|
||||
|
||||
* The `flake.nix` at the root of the repo
|
||||
* The `default.nix` at the root of the repo
|
||||
* From conduwuit's binary cache
|
||||
## Installation methods
|
||||
|
||||
A community maintained NixOS package is available at [`conduwuit`](https://search.nixos.org/packages?channel=unstable&show=conduwuit&from=0&size=50&sort=relevance&type=packages&query=conduwuit)
|
||||
You can acquire Continuwuity with Nix (or [Lix][lix]) from these sources:
|
||||
|
||||
### Binary cache
|
||||
* Directly from Nixpkgs using the official package (`pkgs.matrix-continuwuity`)
|
||||
* The `flake.nix` at the root of the Continuwuity repo
|
||||
* The `default.nix` at the root of the Continuwuity repo
|
||||
|
||||
A binary cache for conduwuit that the CI/CD publishes to is available at the
|
||||
following places (both are the same just different names):
|
||||
## NixOS module
|
||||
|
||||
```
|
||||
https://attic.kennel.juneis.dog/conduit
|
||||
conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk=
|
||||
Continuwuity now has an official NixOS module that simplifies configuration and deployment. The module is available in Nixpkgs as `services.matrix-continuwuity` from NixOS 25.05.
|
||||
|
||||
https://attic.kennel.juneis.dog/conduwuit
|
||||
conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE=
|
||||
Here's a basic example of how to use the module:
|
||||
|
||||
```nix
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
services.matrix-continuwuity = {
|
||||
enable = true;
|
||||
settings = {
|
||||
global = {
|
||||
server_name = "example.com";
|
||||
# Listening on localhost by default
|
||||
# address and port are handled automatically
|
||||
allow_registration = false;
|
||||
allow_encryption = true;
|
||||
allow_federation = true;
|
||||
trusted_servers = [ "matrix.org" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
The binary caches were recreated some months ago due to attic issues. The old public
|
||||
keys were:
|
||||
### Available options
|
||||
|
||||
```
|
||||
conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg=
|
||||
conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw=
|
||||
```
|
||||
The NixOS module provides these configuration options:
|
||||
|
||||
If needed, we have a binary cache on Cachix but it is only limited to 5GB:
|
||||
- `enable`: Enable the Continuwuity service
|
||||
- `user`: The user to run Continuwuity as (defaults to "continuwuity")
|
||||
- `group`: The group to run Continuwuity as (defaults to "continuwuity")
|
||||
- `extraEnvironment`: Extra environment variables to pass to the Continuwuity server
|
||||
- `package`: The Continuwuity package to use
|
||||
- `settings`: The Continuwuity configuration (in TOML format)
|
||||
|
||||
```
|
||||
https://conduwuit.cachix.org
|
||||
conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
```
|
||||
|
||||
If specifying a Git remote URL in your flake, you can use any remotes that
|
||||
are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit`
|
||||
|
||||
### NixOS module
|
||||
|
||||
The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions
|
||||
welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure
|
||||
conduwuit.
|
||||
|
||||
### Conduit NixOS Config Module and SQLite
|
||||
|
||||
Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend.
|
||||
Conduwuit dropped SQLite support in favor of exclusively supporting the much faster RocksDB.
|
||||
Make sure that you are using the RocksDB backend before migrating!
|
||||
|
||||
There is a [tool to migrate a Conduit SQLite database to
|
||||
RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
|
||||
|
||||
If you want to run the latest code, you should get conduwuit from the `flake.nix`
|
||||
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
||||
appropriately to use conduwuit instead of Conduit.
|
||||
Use the `settings` option to configure Continuwuity itself. See the [example configuration file](../configuration/examples.md#example-configuration) for all available options.
|
||||
|
||||
### UNIX sockets
|
||||
|
||||
Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module
|
||||
a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX
|
||||
socket option does not exist in Conduit, and the module forcibly sets the `address` and
|
||||
`port` config options.
|
||||
The NixOS module natively supports UNIX sockets through the `global.unix_socket_path` option. When using UNIX sockets, set `global.address` to `null`:
|
||||
|
||||
```nix
|
||||
options.services.matrix-conduit.settings = lib.mkOption {
|
||||
apply = old: old // (
|
||||
if (old.global ? "unix_socket_path")
|
||||
then { global = builtins.removeAttrs old.global [ "address" "port" ]; }
|
||||
else { }
|
||||
);
|
||||
services.matrix-continuwuity = {
|
||||
enable = true;
|
||||
settings = {
|
||||
global = {
|
||||
server_name = "example.com";
|
||||
address = null; # Must be null when using unix_socket_path
|
||||
unix_socket_path = "/run/continuwuity/continuwuity.sock";
|
||||
unix_socket_perms = 660; # Default permissions for the socket
|
||||
# ...
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
```
|
||||
|
||||
Additionally, the [`matrix-conduit` systemd unit][systemd-unit] in the module does not allow
|
||||
the `AF_UNIX` socket address family in their systemd unit's `RestrictAddressFamilies=` which
|
||||
disallows the namespace from accessing or creating UNIX sockets and has to be enabled like so:
|
||||
The module automatically sets the correct `RestrictAddressFamilies` in the systemd service configuration to allow access to UNIX sockets.
|
||||
|
||||
```nix
|
||||
systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ];
|
||||
```
|
||||
### RocksDB database
|
||||
|
||||
Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and
|
||||
published by the community, would be appreciated.
|
||||
Continuwuity exclusively uses RocksDB as its database backend. The system configures the database path automatically to `/var/lib/continuwuity/` and you cannot change it due to the service's reliance on systemd's StateDir.
|
||||
|
||||
If you're migrating from Conduit with SQLite, use this [tool to migrate a Conduit SQLite database to RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
|
||||
|
||||
### jemalloc and hardened profile
|
||||
|
||||
conduwuit uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix]
|
||||
due to them using `scudo` by default. You must either disable/hide `scudo` from conduwuit, or
|
||||
disable jemalloc like so:
|
||||
Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] because it uses `scudo` by default. Either disable/hide `scudo` from Continuwuity or disable jemalloc like this:
|
||||
|
||||
```nix
|
||||
let
|
||||
conduwuit = pkgs.unstable.conduwuit.override {
|
||||
services.matrix-continuwuity = {
|
||||
enable = true;
|
||||
package = pkgs.matrix-continuwuity.override {
|
||||
enableJemalloc = false;
|
||||
};
|
||||
in
|
||||
# ...
|
||||
};
|
||||
```
|
||||
|
||||
## Upgrading from Conduit
|
||||
|
||||
If you previously used Conduit with the `services.matrix-conduit` module:
|
||||
|
||||
1. Ensure your Conduit uses the RocksDB backend, or migrate from SQLite using the [migration tool](https://github.com/ShadowJonathan/conduit_toolbox/)
|
||||
2. Switch to the new module by changing `services.matrix-conduit` to `services.matrix-continuwuity` in your configuration
|
||||
3. Update any custom configuration to match the new module's structure
|
||||
|
||||
## Reverse proxy configuration
|
||||
|
||||
You'll need to set up a reverse proxy (like nginx or caddy) to expose Continuwuity to the internet. Configure your reverse proxy to forward requests to `/_matrix` on port 443 and 8448 to your Continuwuity instance.
|
||||
|
||||
Here's an example nginx configuration:
|
||||
|
||||
```nginx
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
listen 8448 ssl;
|
||||
listen [::]:8448 ssl;
|
||||
|
||||
server_name example.com;
|
||||
|
||||
# SSL configuration here...
|
||||
|
||||
location /_matrix/ {
|
||||
proxy_pass http://127.0.0.1:6167$request_uri;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
[lix]: https://lix.systems/
|
||||
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
|
||||
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package
|
||||
[hardened.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/profiles/hardened.nix#L22
|
||||
[systemd-unit]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/conduit.nix#L132
|
||||
[hardened.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/profiles/hardened.nix
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
Information about developing the project. If you are only interested in using
|
||||
it, you can safely ignore this page. If you plan on contributing, see the
|
||||
[contributor's guide](./contributing.md).
|
||||
[contributor's guide](./contributing.md) and [code style guide](./development/code_style.md).
|
||||
|
||||
## conduwuit project layout
|
||||
## Continuwuity project layout
|
||||
|
||||
conduwuit uses a collection of sub-crates, packages, or workspace members
|
||||
Continuwuity uses a collection of sub-crates, packages, or workspace members
|
||||
that indicate what each general area of code is for. All of the workspace
|
||||
members are under `src/`. The workspace definition is at the top level / root
|
||||
`Cargo.toml`.
|
||||
|
@ -14,11 +14,11 @@ members are under `src/`. The workspace definition is at the top level / root
|
|||
The crate names are generally self-explanatory:
|
||||
- `admin` is the admin room
|
||||
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
|
||||
- `core` is core conduwuit functionality like config loading, error definitions,
|
||||
- `core` is core Continuwuity functionality like config loading, error definitions,
|
||||
global utilities, logging infrastructure, etc
|
||||
- `database` is RocksDB methods, helpers, RocksDB config, and general database definitions,
|
||||
utilities, or functions
|
||||
- `macros` are conduwuit Rust [macros][macros] like general helper macros, logging
|
||||
- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging
|
||||
and error handling macros, and [syn][syn] and [procedural macros][proc-macro]
|
||||
used for admin room commands and others
|
||||
- `main` is the "primary" sub-crate. This is where the `main()` function lives,
|
||||
|
@ -35,7 +35,7 @@ if you truly find yourself needing to, we recommend reaching out to us in
|
|||
the Matrix room for discussions about it beforehand.
|
||||
|
||||
The primary inspiration for this design was apart of hot reloadable development,
|
||||
to support "conduwuit as a library" where specific parts can simply be swapped out.
|
||||
to support "Continuwuity as a library" where specific parts can simply be swapped out.
|
||||
There is evidence Conduit wanted to go this route too as `axum` is technically an
|
||||
optional feature in Conduit, and can be compiled without the binary or axum library
|
||||
for handling inbound web requests; but it was never completed or worked.
|
||||
|
@ -68,36 +68,27 @@ do this if Rust supported workspace-level features to begin with.
|
|||
|
||||
## List of forked dependencies
|
||||
|
||||
During conduwuit development, we have had to fork
|
||||
some dependencies to support our use-cases in some areas. This ranges from
|
||||
things said upstream project won't accept for any reason, faster-paced
|
||||
development (unresponsive or slow upstream), conduwuit-specific usecases, or
|
||||
lack of time to upstream some things.
|
||||
During Continuwuity (and prior projects) development, we have had to fork some dependencies to support our use-cases.
|
||||
These forks exist for various reasons including features that upstream projects won't accept,
|
||||
faster-paced development, Continuwuity-specific usecases, or lack of time to upstream changes.
|
||||
|
||||
- [ruma/ruma][1]: <https://github.com/girlbossceo/ruwuma> - various performance
|
||||
improvements, more features, faster-paced development, better client/server interop
|
||||
hacks upstream won't accept, etc
|
||||
- [facebook/rocksdb][2]: <https://github.com/girlbossceo/rocksdb> - liburing
|
||||
build fixes and GCC debug build fix
|
||||
- [tikv/jemallocator][3]: <https://github.com/girlbossceo/jemallocator> - musl
|
||||
builds seem to be broken on upstream, fixes some broken/suspicious code in
|
||||
places, additional safety measures, and support redzones for Valgrind
|
||||
- [zyansheep/rustyline-async][4]:
|
||||
<https://github.com/girlbossceo/rustyline-async> - tab completion callback and
|
||||
`CTRL+\` signal quit event for conduwuit console CLI
|
||||
- [rust-rocksdb/rust-rocksdb][5]:
|
||||
<https://github.com/girlbossceo/rust-rocksdb-zaidoon1> - [`@zaidoon1`][8]'s fork
|
||||
has quicker updates, more up to date dependencies, etc. Our fork fixes musl build
|
||||
issues, removes unnecessary `gtest` include, and uses our RocksDB and jemallocator
|
||||
forks.
|
||||
- [tokio-rs/tracing][6]: <https://github.com/girlbossceo/tracing> - Implements
|
||||
`Clone` for `EnvFilter` to support dynamically changing tracing envfilter's
|
||||
alongside other logging/metrics things
|
||||
All forked dependencies are maintained under the [continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation):
|
||||
|
||||
- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various performance improvements, more features and better client/server interop
|
||||
- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via [`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes
|
||||
- [jemallocator][continuwuation-jemallocator] - Fork of [tikv/jemallocator][jemallocator] fixing musl builds, suspicious code,
|
||||
and adding support for redzones in Valgrind
|
||||
- [rustyline-async][continuwuation-rustyline-async] - Fork of [zyansheep/rustyline-async][rustyline-async] with tab completion callback
|
||||
and `CTRL+\` signal quit event for Continuwuity console CLI
|
||||
- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of [rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues,
|
||||
removing unnecessary `gtest` include, and using our RocksDB and jemallocator forks
|
||||
- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing] implementing `Clone` for `EnvFilter` to
|
||||
support dynamically changing tracing environments
|
||||
|
||||
## Debugging with `tokio-console`
|
||||
|
||||
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
|
||||
`tokio-console`-enabled build of conduwuit, enable the `tokio_console` feature,
|
||||
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature,
|
||||
disable the default `release_max_log_level` feature, and set the `--cfg
|
||||
tokio_unstable` flag to enable experimental tokio APIs. A build might look like
|
||||
this:
|
||||
|
@ -109,16 +100,34 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
|||
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
||||
```
|
||||
|
||||
You will also need to enable the `tokio_console` config option in conduwuit when
|
||||
You will also need to enable the `tokio_console` config option in Continuwuity when
|
||||
starting it. This was due to tokio-console causing gradual memory leak/usage
|
||||
if left enabled.
|
||||
|
||||
[1]: https://github.com/ruma/ruma/
|
||||
[2]: https://github.com/facebook/rocksdb/
|
||||
[3]: https://github.com/tikv/jemallocator/
|
||||
[4]: https://github.com/zyansheep/rustyline-async/
|
||||
[5]: https://github.com/rust-rocksdb/rust-rocksdb/
|
||||
[6]: https://github.com/tokio-rs/tracing/
|
||||
## Building Docker Images
|
||||
|
||||
To build a Docker image for Continuwuity, use the standard Docker build command:
|
||||
|
||||
```bash
|
||||
docker build -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
The image can be cross-compiled for different architectures.
|
||||
|
||||
[continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma
|
||||
[continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb
|
||||
[continuwuation-jemallocator]: https://forgejo.ellis.link/continuwuation/jemallocator
|
||||
[continuwuation-rustyline-async]: https://forgejo.ellis.link/continuwuation/rustyline-async
|
||||
[continuwuation-rust-rocksdb]: https://forgejo.ellis.link/continuwuation/rust-rocksdb
|
||||
[continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing
|
||||
|
||||
[ruma]: https://github.com/ruma/ruma/
|
||||
[rocksdb]: https://github.com/facebook/rocksdb/
|
||||
[jemallocator]: https://github.com/tikv/jemallocator/
|
||||
[rustyline-async]: https://github.com/zyansheep/rustyline-async/
|
||||
[rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/
|
||||
[tracing]: https://github.com/tokio-rs/tracing/
|
||||
|
||||
[7]: https://docs.rs/tokio-console/latest/tokio_console/
|
||||
[8]: https://github.com/zaidoon1/
|
||||
[9]: https://github.com/rust-lang/cargo/issues/12162
|
||||
|
|
331
docs/development/code_style.md
Normal file
331
docs/development/code_style.md
Normal file
|
@ -0,0 +1,331 @@
|
|||
# Code Style Guide
|
||||
|
||||
This guide outlines the coding standards and best practices for Continuwuity development. These guidelines help avoid bugs and maintain code consistency, readability, and quality across the project.
|
||||
|
||||
These guidelines apply to new code on a best-effort basis. When modifying existing code, follow existing patterns in the immediate area you're changing and then gradually improve code style when making substantial changes.
|
||||
|
||||
## General Principles
|
||||
|
||||
- **Clarity over cleverness**: Write code that is easy to understand and maintain
|
||||
- **Consistency**: Pragmatically follow existing patterns in the codebase, rather than adding new dependencies.
|
||||
- **Safety**: Prefer safe, explicit code over unsafe code with implicit requirements
|
||||
- **Performance**: Consider performance implications, but not at the expense of correctness or maintainability
|
||||
|
||||
## Formatting and Linting
|
||||
|
||||
All code must satisfy lints (clippy, rustc, rustdoc, etc) and be formatted using **nightly** rustfmt (`cargo +nightly fmt`). Many of the `rustfmt.toml` features depend on the nightly toolchain.
|
||||
|
||||
If you need to allow a lint, ensure it's either obvious why (e.g. clippy saying redundant clone but it's actually required) or add a comment explaining the reason. Do not write inefficient code just to satisfy lints. If a lint is wrong and provides a less efficient solution, allow the lint and mention that in a comment.
|
||||
|
||||
If making large formatting changes across unrelated files, create a separate commit so it can be added to the `.git-blame-ignore-revs` file.
|
||||
|
||||
## Rust-Specific Guidelines
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
Follow standard Rust naming conventions as outlined in the [Rust API Guidelines](https://rust-lang.github.io/api-guidelines/naming.html):
|
||||
|
||||
- Use `snake_case` for functions, variables, and modules
|
||||
- Use `PascalCase` for types, traits, and enum variants
|
||||
- Use `SCREAMING_SNAKE_CASE` for constants and statics
|
||||
- Use descriptive names that clearly indicate purpose
|
||||
|
||||
```rs
|
||||
// Good
|
||||
fn process_user_request(user_id: &UserId) -> Result<Response, Error> { ... }
|
||||
|
||||
const MAX_RETRY_ATTEMPTS: usize = 3;
|
||||
|
||||
struct UserSession {
|
||||
session_id: String,
|
||||
created_at: SystemTime,
|
||||
}
|
||||
|
||||
// Avoid
|
||||
fn proc_reqw(id: &str) -> Result<Resp, Err> { ... }
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
- Use `Result<T, E>` for operations that can fail
|
||||
- Prefer specific error types over generic ones
|
||||
- Use `?` operator for error propagation
|
||||
- Provide meaningful error messages
|
||||
- If needed, create or use an error enum.
|
||||
|
||||
```rs
|
||||
// Good
|
||||
fn parse_server_name(input: &str) -> Result<ServerName, InvalidServerNameError> {
|
||||
ServerName::parse(input)
|
||||
.map_err(|_| InvalidServerNameError::new(input))
|
||||
}
|
||||
|
||||
// Avoid
|
||||
fn parse_server_name(input: &str) -> Result<ServerName, Box<dyn Error>> {
|
||||
Ok(ServerName::parse(input).unwrap())
|
||||
}
|
||||
```
|
||||
|
||||
### Option Handling
|
||||
|
||||
- Prefer explicit `Option` handling over unwrapping
|
||||
- Use combinators like `map`, `and_then`, `unwrap_or_else` when appropriate
|
||||
|
||||
```rs
|
||||
// Good
|
||||
let display_name = user.display_name
|
||||
.as_ref()
|
||||
.map(|name| name.trim())
|
||||
.filter(|name| !name.is_empty())
|
||||
.unwrap_or(&user.localpart);
|
||||
|
||||
// Avoid
|
||||
let display_name = if user.display_name.is_some() {
|
||||
user.display_name.as_ref().unwrap()
|
||||
} else {
|
||||
&user.localpart
|
||||
};
|
||||
```
|
||||
|
||||
## Logging Guidelines
|
||||
|
||||
### Structured Logging
|
||||
|
||||
**Always use structured logging instead of string interpolation.** This improves log parsing, filtering, and observability.
|
||||
|
||||
```rs
|
||||
// Good - structured parameters
|
||||
debug!(
|
||||
room_id = %room_id,
|
||||
user_id = %user_id,
|
||||
event_type = ?event.event_type(),
|
||||
"Processing room event"
|
||||
);
|
||||
|
||||
info!(
|
||||
server_name = %server_name,
|
||||
response_time_ms = response_time.as_millis(),
|
||||
"Federation request completed successfully"
|
||||
);
|
||||
|
||||
// Avoid - string interpolation
|
||||
debug!("Processing room event for {room_id} from {user_id}");
|
||||
info!("Federation request to {server_name} took {response_time:?}");
|
||||
```
|
||||
|
||||
### Log Levels
|
||||
|
||||
Use appropriate log levels:
|
||||
|
||||
- `error!`: Unrecoverable errors that affect functionality
|
||||
- `warn!`: Potentially problematic situations that don't stop execution
|
||||
- `info!`: General information about application flow
|
||||
- `debug!`: Detailed information for debugging
|
||||
- `trace!`: Very detailed information, typically only useful during development
|
||||
|
||||
Keep in mind the frequency that the log will be reached, and the relevancy to a server operator.
|
||||
|
||||
```rs
|
||||
// Good
|
||||
error!(
|
||||
error = %err,
|
||||
room_id = %room_id,
|
||||
"Failed to send event to room"
|
||||
);
|
||||
|
||||
warn!(
|
||||
server_name = %server_name,
|
||||
attempt = retry_count,
|
||||
"Federation request failed, retrying"
|
||||
);
|
||||
|
||||
info!(
|
||||
user_id = %user_id,
|
||||
"User registered successfully"
|
||||
);
|
||||
|
||||
debug!(
|
||||
event_id = %event_id,
|
||||
auth_events = ?auth_event_ids,
|
||||
"Validating event authorization"
|
||||
);
|
||||
```
|
||||
|
||||
### Sensitive Information
|
||||
|
||||
Never log sensitive information such as:
|
||||
- Access tokens
|
||||
- Passwords
|
||||
- Private keys
|
||||
- Personal user data (unless specifically needed for debugging)
|
||||
|
||||
```rs
|
||||
// Good
|
||||
debug!(
|
||||
user_id = %user_id,
|
||||
session_id = %session_id,
|
||||
"Processing authenticated request"
|
||||
);
|
||||
|
||||
// Avoid
|
||||
debug!(
|
||||
user_id = %user_id,
|
||||
access_token = %access_token,
|
||||
"Processing authenticated request"
|
||||
);
|
||||
```
|
||||
|
||||
## Lock Management
|
||||
|
||||
### Explicit Lock Scopes
|
||||
|
||||
**Always use closure guards instead of implicitly dropped guards.** This makes lock scopes explicit and helps prevent deadlocks.
|
||||
|
||||
Use the `WithLock` trait from `core::utils::with_lock`:
|
||||
|
||||
```rs
|
||||
use conduwuit::utils::with_lock::WithLock;
|
||||
|
||||
// Good - explicit closure guard
|
||||
shared_data.with_lock(|data| {
|
||||
data.counter += 1;
|
||||
data.last_updated = SystemTime::now();
|
||||
// Lock is explicitly released here
|
||||
});
|
||||
|
||||
// Avoid - implicit guard
|
||||
{
|
||||
let mut data = shared_data.lock().unwrap();
|
||||
data.counter += 1;
|
||||
data.last_updated = SystemTime::now();
|
||||
// Lock released when guard goes out of scope - less explicit
|
||||
}
|
||||
```
|
||||
|
||||
For async contexts, use the async variant:
|
||||
|
||||
```rs
|
||||
use conduwuit::utils::with_lock::WithLockAsync;
|
||||
|
||||
// Good - async closure guard
|
||||
async_shared_data.with_lock(|data| {
|
||||
data.process_async_update();
|
||||
}).await;
|
||||
```
|
||||
|
||||
### Lock Ordering
|
||||
|
||||
When acquiring multiple locks, always acquire them in a consistent order to prevent deadlocks:
|
||||
|
||||
```rs
|
||||
// Good - consistent ordering (e.g., by memory address or logical hierarchy)
|
||||
let locks = [&lock_a, &lock_b, &lock_c];
|
||||
locks.sort_by_key(|lock| lock as *const _ as usize);
|
||||
|
||||
for lock in locks {
|
||||
lock.with_lock(|data| {
|
||||
// Process data
|
||||
});
|
||||
}
|
||||
|
||||
// Avoid - inconsistent ordering that can cause deadlocks
|
||||
lock_b.with_lock(|data_b| {
|
||||
lock_a.with_lock(|data_a| {
|
||||
// Deadlock risk if another thread acquires in A->B order
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
### Code Comments
|
||||
|
||||
- Reference related documentation or parts of the specification
|
||||
- When a task has multiple ways of being acheved, explain your reasoning for your decision
|
||||
- Update comments when code changes
|
||||
|
||||
```rs
|
||||
/// Processes a federation request with automatic retries and backoff.
|
||||
///
|
||||
/// Implements exponential backoff to handle temporary
|
||||
/// network issues and server overload gracefully.
|
||||
pub async fn send_federation_request(
|
||||
destination: &ServerName,
|
||||
request: FederationRequest,
|
||||
) -> Result<FederationResponse, FederationError> {
|
||||
// Retry with exponential backoff because federation can be flaky
|
||||
// due to network issues or temporary server overload
|
||||
let mut retry_delay = Duration::from_millis(100);
|
||||
|
||||
for attempt in 1..=MAX_RETRIES {
|
||||
match try_send_request(destination, &request).await {
|
||||
Ok(response) => return Ok(response),
|
||||
Err(err) if err.is_retriable() && attempt < MAX_RETRIES => {
|
||||
warn!(
|
||||
destination = %destination,
|
||||
attempt = attempt,
|
||||
error = %err,
|
||||
retry_delay_ms = retry_delay.as_millis(),
|
||||
"Federation request failed, retrying"
|
||||
);
|
||||
|
||||
tokio::time::sleep(retry_delay).await;
|
||||
retry_delay *= 2; // Exponential backoff
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
unreachable!("Loop should have returned or failed by now")
|
||||
}
|
||||
```
|
||||
|
||||
### Async Patterns
|
||||
|
||||
- Use `async`/`await` appropriately
|
||||
- Avoid blocking operations in async contexts
|
||||
- Consider using `tokio::task::spawn_blocking` for CPU-intensive work
|
||||
|
||||
```rs
|
||||
// Good - non-blocking async operation
|
||||
pub async fn fetch_user_profile(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
) -> Result<UserProfile, Error> {
|
||||
let profile = self.db
|
||||
.get_user_profile(user_id)
|
||||
.await?;
|
||||
|
||||
Ok(profile)
|
||||
}
|
||||
|
||||
// Good - CPU-intensive work moved to blocking thread
|
||||
pub async fn generate_thumbnail(
|
||||
&self,
|
||||
image_data: Vec<u8>,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
image::generate_thumbnail(image_data)
|
||||
})
|
||||
.await
|
||||
.map_err(|_| Error::TaskJoinError)?
|
||||
}
|
||||
```
|
||||
|
||||
## Inclusivity and Diversity Guidelines
|
||||
|
||||
All code and documentation must be written with inclusivity and diversity in mind. This ensures our software is welcoming and accessible to all users and contributors. Follow the [Google guide on writing inclusive code and documentation](https://developers.google.com/style/inclusive-documentation) for comprehensive guidance.
|
||||
|
||||
The following types of language are explicitly forbidden in all code, comments, documentation, and commit messages:
|
||||
|
||||
**Ableist language:** Avoid terms like "sanity check", "crazy", "insane", "cripple", or "blind to". Use alternatives like "validation", "unexpected", "disable", or "unaware of".
|
||||
|
||||
**Socially-charged technical terms:** Replace overly divisive terminology with neutral alternatives:
|
||||
- "whitelist/blacklist" → "allowlist/denylist" or "permitted/blocked"
|
||||
- "master/slave" → "primary/replica", "controller/worker", or "parent/child"
|
||||
|
||||
When working with external dependencies that use non-inclusive terminology, avoid propagating them in your own APIs and variable names.
|
||||
|
||||
Use diverse examples in documentation that avoid culturally-specific references, assumptions about user demographics, or unnecessarily gendered language. Design with accessibility and inclusivity in mind by providing clear error messages and considering diverse user needs.
|
||||
|
||||
This software is intended to be used by everyone regardless of background, identity, or ability. Write code and documentation that reflects this commitment to inclusivity.
|
|
@ -5,7 +5,7 @@ guaranteed to work at this time.
|
|||
|
||||
### Summary
|
||||
|
||||
When developing in debug-builds with the nightly toolchain, conduwuit is modular
|
||||
When developing in debug-builds with the nightly toolchain, Continuwuity is modular
|
||||
using dynamic libraries and various parts of the application are hot-reloadable
|
||||
while the server is running: http api handlers, admin commands, services,
|
||||
database, etc. These are all split up into individual workspace crates as seen
|
||||
|
@ -42,7 +42,7 @@ library, macOS, and likely other host architectures are not supported (if other
|
|||
architectures work, feel free to let us know and/or make a PR updating this).
|
||||
This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you
|
||||
happen to have linker issues it's recommended to try using `mold` or `gold`
|
||||
linkers, and please let us know in the [conduwuit Matrix room][7] the linker
|
||||
linkers, and please let us know in the [Continuwuity Matrix room][7] the linker
|
||||
error and what linker solved this issue so we can figure out a solution. Ideally
|
||||
there should be minimal friction to using this, and in the future a build script
|
||||
(`build.rs`) may be suitable to making this easier to use if the capabilities
|
||||
|
@ -52,13 +52,13 @@ allow us.
|
|||
|
||||
As of 19 May 2024, the instructions for using this are:
|
||||
|
||||
0. Have patience. Don't hesitate to join the [conduwuit Matrix room][7] to
|
||||
0. Have patience. Don't hesitate to join the [Continuwuity Matrix room][7] to
|
||||
receive help using this. As indicated by the various rustflags used and some
|
||||
of the interesting issues linked at the bottom, this is definitely not something
|
||||
the Rust ecosystem or toolchain is used to doing.
|
||||
|
||||
1. Install the nightly toolchain using rustup. You may need to use `rustup
|
||||
override set nightly` in your local conduwuit directory, or use `cargo
|
||||
override set nightly` in your local Continuwuity directory, or use `cargo
|
||||
+nightly` for all actions.
|
||||
|
||||
2. Uncomment `cargo-features` at the top level / root Cargo.toml
|
||||
|
@ -85,14 +85,14 @@ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.rustup/toolchains/nightly-x86_64-unknown
|
|||
Cargo should only rebuild what was changed / what's necessary, so it should
|
||||
not be rebuilding all the crates.
|
||||
|
||||
9. In your conduwuit server terminal, hit/send `CTRL+C` signal. This will tell
|
||||
conduwuit to find which libraries need to be reloaded, and reloads them as
|
||||
9. In your Continuwuity server terminal, hit/send `CTRL+C` signal. This will tell
|
||||
Continuwuity to find which libraries need to be reloaded, and reloads them as
|
||||
necessary.
|
||||
|
||||
10. If there were no errors, it will tell you it successfully reloaded `#`
|
||||
modules, and your changes should now be visible. Repeat 7 - 9 as needed.
|
||||
|
||||
To shutdown conduwuit in this setup, hit/send `CTRL+\`. Normal builds still
|
||||
To shutdown Continuwuity in this setup, hit/send `CTRL+\`. Normal builds still
|
||||
shutdown with `CTRL+C` as usual.
|
||||
|
||||
Steps 1 - 5 are the initial first-time steps for using this. To remove the hot
|
||||
|
@ -101,7 +101,7 @@ reload setup, revert/comment all the Cargo.toml changes.
|
|||
As mentioned in the requirements section, if you happen to have some linker
|
||||
issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the
|
||||
`rustflags` definitions in the top level Cargo.toml, and please let us know in
|
||||
the [conduwuit Matrix room][7] the problem. mold can be installed typically
|
||||
the [Continuwuity Matrix room][7] the problem. mold can be installed typically
|
||||
through your distro, and gold is provided by the binutils package.
|
||||
|
||||
It's possible a helper script can be made to do all of this, or most preferably
|
||||
|
@ -136,7 +136,7 @@ acyclic graph. The primary rule is simple and illustrated in the figure below:
|
|||
**no crate is allowed to call a function or use a variable from a crate below
|
||||
it.**
|
||||
|
||||

|
||||
|
||||
When a symbol is referenced between crates they become bound: **crates cannot be
|
||||
|
@ -147,7 +147,7 @@ by using an `RTLD_LOCAL` binding for just one link between the main executable
|
|||
and the first crate, freeing the executable from all modules as no global
|
||||
binding ever occurs between them.
|
||||
|
||||

|
||||
|
||||
Proper resource management is essential for reliable reloading to occur. This is
|
||||
|
@ -190,11 +190,11 @@ The initial implementation PR is available [here][1].
|
|||
- [Workspace-level metadata
|
||||
(cargo-deb)](https://github.com/kornelski/cargo-deb/issues/68)
|
||||
|
||||
[1]: https://github.com/girlbossceo/conduwuit/pull/387
|
||||
[1]: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/387
|
||||
[2]: https://wiki.musl-libc.org/functional-differences-from-glibc.html#Unloading-libraries
|
||||
[3]: https://github.com/rust-lang/rust/issues/28794
|
||||
[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049
|
||||
[5]: https://github.com/rust-lang/cargo/issues/12746
|
||||
[6]: https://crates.io/crates/hot-lib-reloader/
|
||||
[7]: https://matrix.to/#/#conduwuit:puppygock.gay
|
||||
[7]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
|
||||
[8]: https://crates.io/crates/libloading
|
||||
|
|
|
@ -24,8 +24,9 @@ and run the script.
|
|||
If you're on macOS and need to build an image, run `nix build .#linux-complement`.
|
||||
|
||||
We have a Complement fork as some tests have needed to be fixed. This can be found
|
||||
at: <https://github.com/girlbossceo/complement>
|
||||
at: <https://forgejo.ellis.link/continuwuation/complement>
|
||||
|
||||
[ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo
|
||||
[ci-workflows]:
|
||||
https://forgejo.ellis.link/continuwuation/continuwuity/actions/?workflow=ci.yml&actor=0&status=1
|
||||
[complement]: https://github.com/matrix-org/complement
|
||||
[direnv]: https://direnv.net/docs/hook.html
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# conduwuit
|
||||
# Continuwuity
|
||||
|
||||
{{#include ../README.md:catchphrase}}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
|||
|
||||
- [Deployment options](deploying.md)
|
||||
|
||||
If you want to connect an appservice to conduwuit, take a look at the
|
||||
If you want to connect an appservice to Continuwuity, take a look at the
|
||||
[appservices documentation](appservices.md).
|
||||
|
||||
#### How can I contribute?
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# Maintaining your conduwuit setup
|
||||
# Maintaining your Continuwuity setup
|
||||
|
||||
## Moderation
|
||||
|
||||
conduwuit has moderation through admin room commands. "binary commands" (medium
|
||||
Continuwuity has moderation through admin room commands. "binary commands" (medium
|
||||
priority) and an admin API (low priority) is planned. Some moderation-related
|
||||
config options are available in the example config such as "global ACLs" and
|
||||
blocking media requests to certain servers. See the example config for the
|
||||
moderation config options under the "Moderation / Privacy / Security" section.
|
||||
|
||||
conduwuit has moderation admin commands for:
|
||||
Continuwuity has moderation admin commands for:
|
||||
|
||||
- managing room aliases (`!admin rooms alias`)
|
||||
- managing room directory (`!admin rooms directory`)
|
||||
|
@ -36,7 +36,7 @@ each object being newline delimited. An example of doing this is:
|
|||
## Database (RocksDB)
|
||||
|
||||
Generally there is very little you need to do. [Compaction][rocksdb-compaction]
|
||||
is ran automatically based on various defined thresholds tuned for conduwuit to
|
||||
is ran automatically based on various defined thresholds tuned for Continuwuity to
|
||||
be high performance with the least I/O amplifcation or overhead. Manually
|
||||
running compaction is not recommended, or compaction via a timer, due to
|
||||
creating unnecessary I/O amplification. RocksDB is built with io_uring support
|
||||
|
@ -50,7 +50,7 @@ Some RocksDB settings can be adjusted such as the compression method chosen. See
|
|||
the RocksDB section in the [example config](configuration/examples.md).
|
||||
|
||||
btrfs users have reported that database compression does not need to be disabled
|
||||
on conduwuit as the filesystem already does not attempt to compress. This can be
|
||||
on Continuwuity as the filesystem already does not attempt to compress. This can be
|
||||
validated by using `filefrag -v` on a `.SST` file in your database, and ensure
|
||||
the `physical_offset` matches (no filesystem compression). It is very important
|
||||
to ensure no additional filesystem compression takes place as this can render
|
||||
|
@ -70,8 +70,8 @@ they're server logs or database logs, however they are critical RocksDB files
|
|||
related to WAL tracking.
|
||||
|
||||
The only safe files that can be deleted are the `LOG` files (all caps). These
|
||||
are the real RocksDB telemetry/log files, however conduwuit has already
|
||||
configured to only store up to 3 RocksDB `LOG` files due to generall being
|
||||
are the real RocksDB telemetry/log files, however Continuwuity has already
|
||||
configured to only store up to 3 RocksDB `LOG` files due to generally being
|
||||
useless for average users unless troubleshooting something low-level. If you
|
||||
would like to store nearly none at all, see the `rocksdb_max_log_files`
|
||||
config option.
|
||||
|
@ -88,7 +88,7 @@ still be joined together.
|
|||
|
||||
To restore a backup from an online RocksDB backup:
|
||||
|
||||
- shutdown conduwuit
|
||||
- shutdown Continuwuity
|
||||
- create a new directory for merging together the data
|
||||
- in the online backup created, copy all `.sst` files in
|
||||
`$DATABASE_BACKUP_PATH/shared_checksum` to your new directory
|
||||
|
@ -99,9 +99,9 @@ To restore a backup from an online RocksDB backup:
|
|||
if you have multiple) to your new directory
|
||||
- set your `database_path` config option to your new directory, or replace your
|
||||
old one with the new one you crafted
|
||||
- start up conduwuit again and it should open as normal
|
||||
- start up Continuwuity again and it should open as normal
|
||||
|
||||
If you'd like to do an offline backup, shutdown conduwuit and copy your
|
||||
If you'd like to do an offline backup, shutdown Continuwuity and copy your
|
||||
`database_path` directory elsewhere. This can be restored with no modifications
|
||||
needed.
|
||||
|
||||
|
@ -110,7 +110,7 @@ directory.
|
|||
|
||||
## Media
|
||||
|
||||
Media still needs various work, however conduwuit implements media deletion via:
|
||||
Media still needs various work, however Continuwuity implements media deletion via:
|
||||
|
||||
- MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the
|
||||
event)
|
||||
|
@ -118,17 +118,17 @@ event)
|
|||
- Delete remote media in the past `N` seconds/minutes via filesystem metadata on
|
||||
the file created time (`btime`) or file modified time (`mtime`)
|
||||
|
||||
See the `!admin media` command for further information. All media in conduwuit
|
||||
See the `!admin media` command for further information. All media in Continuwuity
|
||||
is stored at `$DATABASE_DIR/media`. This will be configurable soon.
|
||||
|
||||
If you are finding yourself needing extensive granular control over media, we
|
||||
recommend looking into [Matrix Media
|
||||
Repo](https://github.com/t2bot/matrix-media-repo). conduwuit intends to
|
||||
Repo](https://github.com/t2bot/matrix-media-repo). Continuwuity intends to
|
||||
implement various utilities for media, but MMR is dedicated to extensive media
|
||||
management.
|
||||
|
||||
Built-in S3 support is also planned, but for now using a "S3 filesystem" on
|
||||
`media/` works. conduwuit also sends a `Cache-Control` header of 1 year and
|
||||
`media/` works. Continuwuity also sends a `Cache-Control` header of 1 year and
|
||||
immutable for all media requests (download and thumbnail) to reduce unnecessary
|
||||
media requests from browsers, reduce bandwidth usage, and reduce load.
|
||||
|
||||
|
|
1
docs/security.md
Normal file
1
docs/security.md
Normal file
|
@ -0,0 +1 @@
|
|||
{{#include ../SECURITY.md}}
|
21
docs/server_reference.md
Normal file
21
docs/server_reference.md
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Command-Line Help for `continuwuity`
|
||||
|
||||
This document contains the help content for the `continuwuity` command-line program.
|
||||
|
||||
**Command Overview:**
|
||||
|
||||
* [`continuwuity`↴](#continuwuity)
|
||||
|
||||
## `continuwuity`
|
||||
|
||||
a very cool Matrix chat homeserver written in Rust
|
||||
|
||||
**Usage:** `continuwuity [OPTIONS]`
|
||||
|
||||
###### **Options:**
|
||||
|
||||
* `-c`, `--config <CONFIG>` — Path to the config TOML file (optional)
|
||||
* `-O`, `--option <OPTION>` — Override a configuration variable using TOML 'key=value' syntax
|
||||
* `--read-only` — Run in a stricter read-only --maintenance mode
|
||||
* `--maintenance` — Run in maintenance mode while refusing connections
|
||||
* `--execute <EXECUTE>` — Execute console command automatically after startup
|
6
docs/static/_headers
vendored
Normal file
6
docs/static/_headers
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
/.well-known/matrix/*
|
||||
Access-Control-Allow-Origin: *
|
||||
Content-Type: application/json
|
||||
/.well-known/continuwuity/*
|
||||
Access-Control-Allow-Origin: *
|
||||
Content-Type: application/json
|
13
docs/static/announcements.json
vendored
Normal file
13
docs/static/announcements.json
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"$schema": "https://continuwuity.org/schema/announcements.schema.json",
|
||||
"announcements": [
|
||||
{
|
||||
"id": 1,
|
||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
|
||||
}
|
||||
]
|
||||
}
|
35
docs/static/announcements.schema.json
vendored
Normal file
35
docs/static/announcements.schema.json
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"$id": "https://continwuity.org/schema/announcements.schema.json",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"announcements": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"date": {
|
||||
"type": "string"
|
||||
},
|
||||
"mention_room": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to mention the room (@room) when posting this announcement"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"message"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"announcements"
|
||||
]
|
||||
}
|
1
docs/static/client
vendored
Normal file
1
docs/static/client
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"}}
|
1
docs/static/server
vendored
Normal file
1
docs/static/server
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
{"m.server":"matrix.continuwuity.org:443"}
|
24
docs/static/support
vendored
Normal file
24
docs/static/support
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"contacts": [
|
||||
{
|
||||
"email_address": "security@continuwuity.org",
|
||||
"role": "m.role.security"
|
||||
},
|
||||
{
|
||||
"matrix_id": "@tom:continuwuity.org",
|
||||
"email_address": "tom@tcpip.uk",
|
||||
"role": "m.role.admin"
|
||||
},
|
||||
{
|
||||
"matrix_id": "@jade:continuwuity.org",
|
||||
"email_address": "jade@continuwuity.org",
|
||||
"role": "m.role.admin"
|
||||
},
|
||||
{
|
||||
"matrix_id": "@nex:continuwuity.org",
|
||||
"email_address": "nex@continuwuity.org",
|
||||
"role": "m.role.admin"
|
||||
}
|
||||
],
|
||||
"support_page": "https://continuwuity.org/introduction#contact"
|
||||
}
|
|
@ -1,47 +1,48 @@
|
|||
# Troubleshooting conduwuit
|
||||
# Troubleshooting Continuwuity
|
||||
|
||||
> ## Docker users ⚠️
|
||||
> **Docker users ⚠️**
|
||||
>
|
||||
> Docker is extremely UX unfriendly. Because of this, a ton of issues or support
|
||||
> is actually Docker support, not conduwuit support. We also cannot document the
|
||||
> ever-growing list of Docker issues here.
|
||||
>
|
||||
> If you intend on asking for support and you are using Docker, **PLEASE**
|
||||
> triple validate your issues are **NOT** because you have a misconfiguration in
|
||||
> your Docker setup.
|
||||
>
|
||||
> If there are things like Compose file issues or Dockerhub image issues, those
|
||||
> can still be mentioned as long as they're something we can fix.
|
||||
> Docker can be difficult to use and debug. It's common for Docker
|
||||
> misconfigurations to cause issues, particularly with networking and permissions.
|
||||
> Please check that your issues are not due to problems with your Docker setup.
|
||||
|
||||
## conduwuit and Matrix issues
|
||||
## Continuwuity and Matrix issues
|
||||
|
||||
#### Lost access to admin room
|
||||
### Lost access to admin room
|
||||
|
||||
You can reinvite yourself to the admin room through the following methods:
|
||||
- Use the `--execute "users make_user_admin <username>"` conduwuit binary
|
||||
|
||||
- Use the `--execute "users make_user_admin <username>"` Continuwuity binary
|
||||
argument once to invite yourslf to the admin room on startup
|
||||
- Use the conduwuit console/CLI to run the `users make_user_admin` command
|
||||
- Use the Continuwuity console/CLI to run the `users make_user_admin` command
|
||||
- Or specify the `emergency_password` config option to allow you to temporarily
|
||||
log into the server account (`@conduit`) from a web client
|
||||
|
||||
## General potential issues
|
||||
|
||||
#### Potential DNS issues when using Docker
|
||||
### Potential DNS issues when using Docker
|
||||
|
||||
Docker has issues with its default DNS setup that may cause DNS to not be
|
||||
properly functional when running conduwuit, resulting in federation issues. The
|
||||
symptoms of this have shown in excessively long room joins (30+ minutes) from
|
||||
very long DNS timeouts, log entries of "mismatching responding nameservers",
|
||||
Docker's DNS setup for containers in a non-default network intercepts queries to
|
||||
enable resolving of container hostnames to IP addresses. However, due to
|
||||
performance issues with Docker's built-in resolver, this can cause DNS queries
|
||||
to take a long time to resolve, resulting in federation issues.
|
||||
|
||||
This is particularly common with Docker Compose, as custom networks are easily
|
||||
created and configured.
|
||||
|
||||
Symptoms of this include excessively long room joins (30+ minutes) from very
|
||||
long DNS timeouts, log entries of "mismatching responding nameservers",
|
||||
and/or partial or non-functional inbound/outbound federation.
|
||||
|
||||
This is **not** a conduwuit issue, and is purely a Docker issue. It is not
|
||||
sustainable for heavy DNS activity which is normal for Matrix federation. The
|
||||
workarounds for this are:
|
||||
- Use DNS over TCP via the config option `query_over_tcp_only = true`
|
||||
- Don't use Docker's default DNS setup and instead allow the container to use
|
||||
and communicate with your host's DNS servers (host's `/etc/resolv.conf`)
|
||||
This is not a bug in continuwuity. Docker's default DNS resolver is not suitable
|
||||
for heavy DNS activity, which is normal for federated protocols like Matrix.
|
||||
|
||||
#### DNS No connections available error message
|
||||
Workarounds:
|
||||
|
||||
- Use DNS over TCP via the config option `query_over_tcp_only = true`
|
||||
- Bypass Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers. Typically, this can be done by mounting the host's `/etc/resolv.conf`.
|
||||
|
||||
### DNS No connections available error message
|
||||
|
||||
If you receive spurious amounts of error logs saying "DNS No connections
|
||||
available", this is due to your DNS server (servers from `/etc/resolv.conf`)
|
||||
|
@ -64,7 +65,7 @@ very computationally expensive, and is extremely susceptible to denial of
|
|||
service, especially on Matrix. Many servers also strangely have broken DNSSEC
|
||||
setups and will result in non-functional federation.
|
||||
|
||||
conduwuit cannot provide a "works-for-everyone" Unbound DNS setup guide, but
|
||||
Continuwuity cannot provide a "works-for-everyone" Unbound DNS setup guide, but
|
||||
the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch]
|
||||
may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors
|
||||
config options and removing the `validator` module.
|
||||
|
@ -75,9 +76,9 @@ high load, and we have identified its DNS caching to not be very effective.
|
|||
dnsmasq can possibly work, but it does **not** support TCP fallback which can be
|
||||
problematic when receiving large DNS responses such as from large SRV records.
|
||||
If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback`
|
||||
in conduwuit config.
|
||||
in Continuwuity config.
|
||||
|
||||
Raising `dns_cache_entries` in conduwuit config from the default can also assist
|
||||
Raising `dns_cache_entries` in Continuwuity config from the default can also assist
|
||||
in DNS caching, but a full-fledged external caching resolver is better and more
|
||||
reliable.
|
||||
|
||||
|
@ -91,13 +92,13 @@ reliability at a slight performance cost due to TCP overhead.
|
|||
|
||||
## RocksDB / database issues
|
||||
|
||||
#### Database corruption
|
||||
### Database corruption
|
||||
|
||||
If your database is corrupted *and* is failing to start (e.g. checksum
|
||||
mismatch), it may be recoverable but careful steps must be taken, and there is
|
||||
no guarantee it may be recoverable.
|
||||
|
||||
The first thing that can be done is launching conduwuit with the
|
||||
The first thing that can be done is launching Continuwuity with the
|
||||
`rocksdb_repair` config option set to true. This will tell RocksDB to attempt to
|
||||
repair itself at launch. If this does not work, disable the option and continue
|
||||
reading.
|
||||
|
@ -109,7 +110,7 @@ RocksDB has the following recovery modes:
|
|||
- `PointInTime`
|
||||
- `SkipAnyCorruptedRecord`
|
||||
|
||||
By default, conduwuit uses `TolerateCorruptedTailRecords` as generally these may
|
||||
By default, Continuwuity uses `TolerateCorruptedTailRecords` as generally these may
|
||||
be due to bad federation and we can re-fetch the correct data over federation.
|
||||
The RocksDB default is `PointInTime` which will attempt to restore a "snapshot"
|
||||
of the data when it was last known to be good. This data can be either a few
|
||||
|
@ -126,12 +127,12 @@ if `PointInTime` does not work as a last ditch effort.
|
|||
|
||||
With this in mind:
|
||||
|
||||
- First start conduwuit with the `PointInTime` recovery method. See the [example
|
||||
- First start Continuwuity with the `PointInTime` recovery method. See the [example
|
||||
config](configuration/examples.md) for how to do this using
|
||||
`rocksdb_recovery_mode`
|
||||
- If your database successfully opens, clients are recommended to clear their
|
||||
client cache to account for the rollback
|
||||
- Leave your conduwuit running in `PointInTime` for at least 30-60 minutes so as
|
||||
- Leave your Continuwuity running in `PointInTime` for at least 30-60 minutes so as
|
||||
much possible corruption is restored
|
||||
- If all goes will, you should be able to restore back to using
|
||||
`TolerateCorruptedTailRecords` and you have successfully recovered your database
|
||||
|
@ -142,16 +143,16 @@ Note that users should not really be debugging things. If you find yourself
|
|||
debugging and find the issue, please let us know and/or how we can fix it.
|
||||
Various debug commands can be found in `!admin debug`.
|
||||
|
||||
#### Debug/Trace log level
|
||||
### Debug/Trace log level
|
||||
|
||||
conduwuit builds without debug or trace log levels at compile time by default
|
||||
Continuwuity builds without debug or trace log levels at compile time by default
|
||||
for substantial performance gains in CPU usage and improved compile times. If
|
||||
you need to access debug/trace log levels, you will need to build without the
|
||||
`release_max_log_level` feature or use our provided static debug binaries.
|
||||
|
||||
#### Changing log level dynamically
|
||||
### Changing log level dynamically
|
||||
|
||||
conduwuit supports changing the tracing log environment filter on-the-fly using
|
||||
Continuwuity supports changing the tracing log environment filter on-the-fly using
|
||||
the admin command `!admin debug change-log-level <log env filter>`. This accepts
|
||||
a string **without quotes** the same format as the `log` config option.
|
||||
|
||||
|
@ -166,9 +167,9 @@ load, simply pass the `--reset` flag.
|
|||
|
||||
`!admin debug change-log-level --reset`
|
||||
|
||||
#### Pinging servers
|
||||
### Pinging servers
|
||||
|
||||
conduwuit can ping other servers using `!admin debug ping <server>`. This takes
|
||||
Continuwuity can ping other servers using `!admin debug ping <server>`. This takes
|
||||
a server name and goes through the server discovery process and queries
|
||||
`/_matrix/federation/v1/version`. Errors are outputted.
|
||||
|
||||
|
@ -177,15 +178,15 @@ server performance on either side as that endpoint is completely unauthenticated
|
|||
and simply fetches a string on a static JSON endpoint. It is very low cost both
|
||||
bandwidth and computationally.
|
||||
|
||||
#### Allocator memory stats
|
||||
### Allocator memory stats
|
||||
|
||||
When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you
|
||||
can see conduwuit's high-level allocator stats by using
|
||||
can see Continuwuity's high-level allocator stats by using
|
||||
`!admin server memory-usage` at the bottom.
|
||||
|
||||
If you are a developer, you can also view the raw jemalloc statistics with
|
||||
`!admin debug memory-stats`. Please note that this output is extremely large
|
||||
which may only be visible in the conduwuit console CLI due to PDU size limits,
|
||||
which may only be visible in the Continuwuity console CLI due to PDU size limits,
|
||||
and is not easy for non-developers to understand.
|
||||
|
||||
[unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html
|
||||
|
|
32
docs/turn.md
32
docs/turn.md
|
@ -1,6 +1,6 @@
|
|||
# Setting up TURN/STURN
|
||||
|
||||
In order to make or receive calls, a TURN server is required. conduwuit suggests
|
||||
In order to make or receive calls, a TURN server is required. Continuwuity suggests
|
||||
using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also
|
||||
available as a Docker image.
|
||||
|
||||
|
@ -17,9 +17,9 @@ realm=<your server domain>
|
|||
A common way to generate a suitable alphanumeric secret key is by using `pwgen
|
||||
-s 64 1`.
|
||||
|
||||
These same values need to be set in conduwuit. See the [example
|
||||
These same values need to be set in Continuwuity. See the [example
|
||||
config](configuration/examples.md) in the TURN section for configuring these and
|
||||
restart conduwuit after.
|
||||
restart Continuwuity after.
|
||||
|
||||
`turn_secret` or a path to `turn_secret_file` must have a value of your
|
||||
coturn `static-auth-secret`, or use `turn_username` and `turn_password`
|
||||
|
@ -34,7 +34,7 @@ If you are using TURN over TLS, you can replace `turn:` with `turns:` in the
|
|||
TURN over TLS. This is highly recommended.
|
||||
|
||||
If you need unauthenticated access to the TURN URIs, or some clients may be
|
||||
having trouble, you can enable `turn_guest_access` in conduwuit which disables
|
||||
having trouble, you can enable `turn_guest_access` in Continuwuity which disables
|
||||
authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer`
|
||||
|
||||
### Run
|
||||
|
@ -68,3 +68,27 @@ documentation](https://github.com/coturn/coturn/blob/master/docker/coturn/README
|
|||
|
||||
For security recommendations see Synapse's [Coturn
|
||||
documentation](https://element-hq.github.io/synapse/latest/turn-howto.html).
|
||||
|
||||
### Testing
|
||||
|
||||
To make sure turn credentials are being correctly served to clients, you can manually make a HTTP request to the turnServer endpoint.
|
||||
|
||||
`curl "https://<matrix.example.com>/_matrix/client/r0/voip/turnServer" -H 'Authorization: Bearer <your_client_token>' | jq`
|
||||
|
||||
You should get a response like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"username": "1752792167:@jade:example.com",
|
||||
"password": "KjlDlawdPbU9mvP4bhdV/2c/h65=",
|
||||
"uris": [
|
||||
"turns:coturn.example.com?transport=udp",
|
||||
"turns:coturn.example.com?transport=tcp",
|
||||
"turn:coturn.example.com?transport=udp",
|
||||
"turn:coturn.example.com?transport=tcp"
|
||||
],
|
||||
"ttl": 86400
|
||||
}
|
||||
```
|
||||
|
||||
You can test these credentials work using [Trickle ICE](https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/)
|
||||
|
|
|
@ -83,7 +83,7 @@ env DIRENV_DEVSHELL=all-features \
|
|||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-features \
|
||||
--features full \
|
||||
--no-deps \
|
||||
--document-private-items \
|
||||
--color always
|
||||
|
@ -96,6 +96,7 @@ script = """
|
|||
direnv exec . \
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--profile test \
|
||||
--color=always \
|
||||
|
@ -113,7 +114,7 @@ env DIRENV_DEVSHELL=all-features \
|
|||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-features \
|
||||
--features full \
|
||||
--color=always \
|
||||
-- \
|
||||
-D warnings
|
||||
|
|
159
flake.lock
generated
159
flake.lock
generated
|
@ -10,11 +10,11 @@
|
|||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738524606,
|
||||
"narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=",
|
||||
"lastModified": 1751403276,
|
||||
"narHash": "sha256-V0EPQNsQko1a8OqIWc2lLviLnMpR1m08Ej00z5RVTfs=",
|
||||
"owner": "zhaofengli",
|
||||
"repo": "attic",
|
||||
"rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e",
|
||||
"rev": "896ad88fa57ad5dbcd267c0ac51f1b71ccfcb4dd",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -32,11 +32,11 @@
|
|||
"nixpkgs": "nixpkgs_4"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1737621947,
|
||||
"narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=",
|
||||
"lastModified": 1748883665,
|
||||
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
|
||||
"owner": "cachix",
|
||||
"repo": "cachix",
|
||||
"rev": "f65a3cd5e339c223471e64c051434616e18cc4f5",
|
||||
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -63,11 +63,11 @@
|
|||
"nixpkgs": "nixpkgs_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1728672398,
|
||||
"narHash": "sha256-KxuGSoVUFnQLB2ZcYODW7AVPAh9JqRlD5BrfsC/Q4qs=",
|
||||
"lastModified": 1744206633,
|
||||
"narHash": "sha256-pb5aYkE8FOoa4n123slgHiOf1UbNSnKe5pEZC+xXD5g=",
|
||||
"owner": "cachix",
|
||||
"repo": "cachix",
|
||||
"rev": "aac51f698309fd0f381149214b7eee213c66ef0a",
|
||||
"rev": "8a60090640b96f9df95d1ab99e5763a586be1404",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -77,23 +77,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"complement": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1741891349,
|
||||
"narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=",
|
||||
"owner": "girlbossceo",
|
||||
"repo": "complement",
|
||||
"rev": "e587b3df569cba411aeac7c20b6366d03c143745",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "girlbossceo",
|
||||
"ref": "main",
|
||||
"repo": "complement",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
|
@ -117,11 +100,11 @@
|
|||
},
|
||||
"crane_2": {
|
||||
"locked": {
|
||||
"lastModified": 1739936662,
|
||||
"narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=",
|
||||
"lastModified": 1750266157,
|
||||
"narHash": "sha256-tL42YoNg9y30u7zAqtoGDNdTyXTi8EALDeCB13FtbQA=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "19de14aaeb869287647d9461cbd389187d8ecdb7",
|
||||
"rev": "e37c943371b73ed87faf33f7583860f81f1d5a48",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -149,11 +132,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1733323168,
|
||||
"narHash": "sha256-d5DwB4MZvlaQpN6OQ4SLYxb5jA4UH5EtV5t5WOtjLPU=",
|
||||
"lastModified": 1748273445,
|
||||
"narHash": "sha256-5V0dzpNgQM0CHDsMzh+ludYeu1S+Y+IMjbaskSSdFh0=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "efa9010b8b1cfd5dd3c7ed1e172a470c3b84a064",
|
||||
"rev": "668a50d8b7bdb19a0131f53c9f6c25c9071e1ffb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -170,11 +153,11 @@
|
|||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1740724364,
|
||||
"narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=",
|
||||
"lastModified": 1755585599,
|
||||
"narHash": "sha256-tl/0cnsqB/Yt7DbaGMel2RLa7QG5elA8lkaOXli6VdY=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "edf7d9e431cda8782e729253835f178a356d3aab",
|
||||
"rev": "6ed03ef4c8ec36d193c18e06b9ecddde78fb7e42",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -203,11 +186,11 @@
|
|||
"flake-compat_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1733328505,
|
||||
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
|
||||
"lastModified": 1747046372,
|
||||
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
|
||||
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -219,11 +202,11 @@
|
|||
"flake-compat_3": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1733328505,
|
||||
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
|
||||
"lastModified": 1747046372,
|
||||
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
|
||||
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -306,15 +289,14 @@
|
|||
"nixpkgs": [
|
||||
"cachix",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-stable": "nixpkgs-stable_2"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1733318908,
|
||||
"narHash": "sha256-SVQVsbafSM1dJ4fpgyBqLZ+Lft+jcQuMtEL3lQWx2Sk=",
|
||||
"lastModified": 1747372754,
|
||||
"narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "6f4e2a2112050951a314d2733a994fbab94864c6",
|
||||
"rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -361,23 +343,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"liburing": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1740613216,
|
||||
"narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=",
|
||||
"owner": "axboe",
|
||||
"repo": "liburing",
|
||||
"rev": "e1003e496e66f9b0ae06674869795edf772d5500",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "axboe",
|
||||
"ref": "master",
|
||||
"repo": "liburing",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
|
@ -401,11 +366,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1727438425,
|
||||
"narHash": "sha256-X8ES7I1cfNhR9oKp06F6ir4Np70WGZU5sfCOuNBEwMg=",
|
||||
"lastModified": 1745930071,
|
||||
"narHash": "sha256-bYyjarS3qSNqxfgc89IoVz8cAFDkF9yPE63EJr+h50s=",
|
||||
"owner": "domenkozar",
|
||||
"repo": "nix",
|
||||
"rev": "f6c5ae4c1b2e411e6b1e6a8181cc84363d6a7546",
|
||||
"rev": "b455edf3505f1bf0172b39a735caef94687d0d9c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -484,29 +449,13 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-stable_2": {
|
||||
"locked": {
|
||||
"lastModified": 1730741070,
|
||||
"narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "d063c1dd113c91ab27959ba540c0d9753409edf3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-24.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1730531603,
|
||||
"narHash": "sha256-Dqg6si5CqIzm87sp57j5nTaeBbWhHFaVyG7V6L8k3lY=",
|
||||
"lastModified": 1733212471,
|
||||
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7ffd9ae656aec493492b44d0ddfb28e79a1ea25d",
|
||||
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -534,11 +483,11 @@
|
|||
},
|
||||
"nixpkgs_4": {
|
||||
"locked": {
|
||||
"lastModified": 1733212471,
|
||||
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
|
||||
"lastModified": 1748190013,
|
||||
"narHash": "sha256-R5HJFflOfsP5FBtk+zE8FpL8uqE7n62jqOsADvVshhE=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
|
||||
"rev": "62b852f6c6742134ade1abdd2a21685fd617a291",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -550,11 +499,11 @@
|
|||
},
|
||||
"nixpkgs_5": {
|
||||
"locked": {
|
||||
"lastModified": 1740547748,
|
||||
"narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=",
|
||||
"lastModified": 1751498133,
|
||||
"narHash": "sha256-QWJ+NQbMU+NcU2xiyo7SNox1fAuwksGlQhpzBl76g1I=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "3a05eebede89661660945da1f151959900903b6a",
|
||||
"rev": "d55716bb59b91ae9d1ced4b1ccdea7a442ecbfdb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -564,46 +513,26 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rocksdb": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1741308171,
|
||||
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
|
||||
"owner": "girlbossceo",
|
||||
"repo": "rocksdb",
|
||||
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "girlbossceo",
|
||||
"ref": "v9.11.1",
|
||||
"repo": "rocksdb",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"attic": "attic",
|
||||
"cachix": "cachix",
|
||||
"complement": "complement",
|
||||
"crane": "crane_2",
|
||||
"fenix": "fenix",
|
||||
"flake-compat": "flake-compat_3",
|
||||
"flake-utils": "flake-utils",
|
||||
"liburing": "liburing",
|
||||
"nix-filter": "nix-filter",
|
||||
"nixpkgs": "nixpkgs_5",
|
||||
"rocksdb": "rocksdb"
|
||||
"nixpkgs": "nixpkgs_5"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1740691488,
|
||||
"narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=",
|
||||
"lastModified": 1755504847,
|
||||
"narHash": "sha256-VX0B9hwhJypCGqncVVLC+SmeMVd/GAYbJZ0MiiUn2Pk=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5",
|
||||
"rev": "a905e3b21b144d77e1b304e49f3264f6f8d4db75",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
437
flake.nix
437
flake.nix
|
@ -2,113 +2,133 @@
|
|||
inputs = {
|
||||
attic.url = "github:zhaofengli/attic?ref=main";
|
||||
cachix.url = "github:cachix/cachix?ref=master";
|
||||
complement = { url = "github:girlbossceo/complement?ref=main"; flake = false; };
|
||||
crane = { url = "github:ipetkov/crane?ref=master"; };
|
||||
fenix = { url = "github:nix-community/fenix?ref=main"; inputs.nixpkgs.follows = "nixpkgs"; };
|
||||
flake-compat = { url = "github:edolstra/flake-compat?ref=master"; flake = false; };
|
||||
crane = {
|
||||
url = "github:ipetkov/crane?ref=master";
|
||||
};
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix?ref=main";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
flake-compat = {
|
||||
url = "github:edolstra/flake-compat?ref=master";
|
||||
flake = false;
|
||||
};
|
||||
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
||||
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
||||
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; };
|
||||
liburing = { url = "github:axboe/liburing?ref=master"; flake = false; };
|
||||
};
|
||||
|
||||
outputs = inputs:
|
||||
inputs.flake-utils.lib.eachDefaultSystem (system:
|
||||
outputs =
|
||||
inputs:
|
||||
inputs.flake-utils.lib.eachDefaultSystem (
|
||||
system:
|
||||
let
|
||||
pkgsHost = import inputs.nixpkgs {
|
||||
inherit system;
|
||||
};
|
||||
pkgsHostStatic = pkgsHost.pkgsStatic;
|
||||
|
||||
fnx = inputs.fenix.packages.${system};
|
||||
# The Rust toolchain to use
|
||||
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
|
||||
toolchain = fnx.combine [
|
||||
(fnx.fromToolchainFile {
|
||||
file = ./rust-toolchain.toml;
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI=";
|
||||
};
|
||||
|
||||
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
||||
inherit pkgs;
|
||||
book = self.callPackage ./nix/pkgs/book {};
|
||||
complement = self.callPackage ./nix/pkgs/complement {};
|
||||
craneLib = ((inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain));
|
||||
inherit inputs;
|
||||
main = self.callPackage ./nix/pkgs/main {};
|
||||
oci-image = self.callPackage ./nix/pkgs/oci-image {};
|
||||
tini = pkgs.tini.overrideAttrs {
|
||||
# newer clang/gcc is unhappy with tini-static: <https://3.dog/~strawberry/pb/c8y4>
|
||||
patches = [ (pkgs.fetchpatch {
|
||||
url = "https://patch-diff.githubusercontent.com/raw/krallin/tini/pull/224.patch";
|
||||
hash = "sha256-4bTfAhRyIT71VALhHY13hUgbjLEUyvgkIJMt3w9ag3k=";
|
||||
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
|
||||
})
|
||||
fnx.complete.rustfmt
|
||||
];
|
||||
};
|
||||
|
||||
mkScope =
|
||||
pkgs:
|
||||
pkgs.lib.makeScope pkgs.newScope (self: {
|
||||
inherit pkgs inputs;
|
||||
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
|
||||
main = self.callPackage ./pkg/nix/pkgs/main { };
|
||||
liburing = pkgs.liburing.overrideAttrs {
|
||||
# Tests weren't building
|
||||
outputs = [ "out" "dev" "man" ];
|
||||
outputs = [
|
||||
"out"
|
||||
"dev"
|
||||
"man"
|
||||
];
|
||||
buildFlags = [ "library" ];
|
||||
src = inputs.liburing;
|
||||
};
|
||||
rocksdb = (pkgs.rocksdb.override {
|
||||
liburing = self.liburing;
|
||||
}).overrideAttrs (old: {
|
||||
src = inputs.rocksdb;
|
||||
version = pkgs.lib.removePrefix
|
||||
"v"
|
||||
(builtins.fromJSON (builtins.readFile ./flake.lock))
|
||||
.nodes.rocksdb.original.ref;
|
||||
# we have this already at https://github.com/girlbossceo/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
|
||||
# unsetting this so i don't have to revert it and make this nix exclusive
|
||||
patches = [];
|
||||
cmakeFlags = pkgs.lib.subtractLists
|
||||
[
|
||||
# no real reason to have snappy or zlib, no one uses this
|
||||
rocksdb =
|
||||
(pkgs.rocksdb_9_10.override {
|
||||
# Override the liburing input for the build with our own so
|
||||
# we have it built with the library flag
|
||||
inherit (self) liburing;
|
||||
}).overrideAttrs
|
||||
(old: {
|
||||
src = pkgsHost.fetchFromGitea {
|
||||
domain = "forgejo.ellis.link";
|
||||
owner = "continuwuation";
|
||||
repo = "rocksdb";
|
||||
rev = "10.4.fb";
|
||||
sha256 = "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=";
|
||||
};
|
||||
version = "v10.4.fb";
|
||||
cmakeFlags =
|
||||
pkgs.lib.subtractLists [
|
||||
# No real reason to have snappy or zlib, no one uses this
|
||||
"-DWITH_SNAPPY=1"
|
||||
"-DZLIB=1"
|
||||
"-DWITH_ZLIB=1"
|
||||
# we dont need to use ldb or sst_dump (core_tools)
|
||||
# We don't need to use ldb or sst_dump (core_tools)
|
||||
"-DWITH_CORE_TOOLS=1"
|
||||
# we dont need to build rocksdb tests
|
||||
# We don't need to build rocksdb tests
|
||||
"-DWITH_TESTS=1"
|
||||
# we use rust-rocksdb via C interface and dont need C++ RTTI
|
||||
# We use rust-rocksdb via C interface and don't need C++ RTTI
|
||||
"-DUSE_RTTI=1"
|
||||
# this doesn't exist in RocksDB, and USE_SSE is deprecated for
|
||||
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
|
||||
# PORTABLE=$(march)
|
||||
"-DFORCE_SSE42=1"
|
||||
# PORTABLE will get set in main/default.nix
|
||||
"-DPORTABLE=1"
|
||||
]
|
||||
old.cmakeFlags
|
||||
] old.cmakeFlags
|
||||
++ [
|
||||
# no real reason to have snappy, no one uses this
|
||||
# No real reason to have snappy, no one uses this
|
||||
"-DWITH_SNAPPY=0"
|
||||
"-DZLIB=0"
|
||||
"-DWITH_ZLIB=0"
|
||||
# we dont need to use ldb or sst_dump (core_tools)
|
||||
# We don't need to use ldb or sst_dump (core_tools)
|
||||
"-DWITH_CORE_TOOLS=0"
|
||||
# we dont need trace tools
|
||||
# We don't need trace tools
|
||||
"-DWITH_TRACE_TOOLS=0"
|
||||
# we dont need to build rocksdb tests
|
||||
# We don't need to build rocksdb tests
|
||||
"-DWITH_TESTS=0"
|
||||
# we use rust-rocksdb via C interface and dont need C++ RTTI
|
||||
# We use rust-rocksdb via C interface and don't need C++ RTTI
|
||||
"-DUSE_RTTI=0"
|
||||
];
|
||||
|
||||
# outputs has "tools" which we dont need or use
|
||||
# outputs has "tools" which we don't need or use
|
||||
outputs = [ "out" ];
|
||||
|
||||
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
|
||||
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
|
||||
preInstall = "";
|
||||
|
||||
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
|
||||
# Unsetting this so we don't have to revert it and make this nix exclusive
|
||||
patches = [ ];
|
||||
|
||||
postPatch = ''
|
||||
# Fix gcc-13 build failures due to missing <cstdint> and
|
||||
# <system_error> includes, fixed upstream since 8.x
|
||||
sed -e '1i #include <cstdint>' -i db/compaction/compaction_iteration_stats.h
|
||||
sed -e '1i #include <cstdint>' -i table/block_based/data_block_hash_index.h
|
||||
sed -e '1i #include <cstdint>' -i util/string_util.h
|
||||
sed -e '1i #include <cstdint>' -i include/rocksdb/utilities/checkpoint.h
|
||||
'';
|
||||
});
|
||||
});
|
||||
|
||||
scopeHost = mkScope pkgsHost;
|
||||
scopeHostStatic = mkScope pkgsHostStatic;
|
||||
scopeCrossLinux = mkScope pkgsHost.pkgsLinux.pkgsStatic;
|
||||
mkCrossScope = crossSystem:
|
||||
let pkgsCrossStatic = (import inputs.nixpkgs {
|
||||
mkCrossScope =
|
||||
crossSystem:
|
||||
let
|
||||
pkgsCrossStatic =
|
||||
(import inputs.nixpkgs {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = crossSystem;
|
||||
|
@ -117,85 +137,19 @@
|
|||
in
|
||||
mkScope pkgsCrossStatic;
|
||||
|
||||
mkDevShell = scope: scope.pkgs.mkShell {
|
||||
env = scope.main.env // {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so. The
|
||||
# `rust-src` component is required in order for this to work.
|
||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||
|
||||
# Convenient way to access a pinned version of Complement's source
|
||||
# code.
|
||||
COMPLEMENT_SRC = inputs.complement.outPath;
|
||||
|
||||
# Needed for Complement: <https://github.com/golang/go/issues/52690>
|
||||
CGO_CFLAGS = "-Wl,--no-gc-sections";
|
||||
CGO_LDFLAGS = "-Wl,--no-gc-sections";
|
||||
};
|
||||
|
||||
# Development tools
|
||||
packages = [
|
||||
# Always use nightly rustfmt because most of its options are unstable
|
||||
#
|
||||
# This needs to come before `toolchain` in this list, otherwise
|
||||
# `$PATH` will have stable rustfmt instead.
|
||||
inputs.fenix.packages.${system}.latest.rustfmt
|
||||
|
||||
toolchain
|
||||
]
|
||||
++ (with pkgsHost.pkgs; [
|
||||
# Required by hardened-malloc.rs dep
|
||||
binutils
|
||||
|
||||
cargo-audit
|
||||
cargo-auditable
|
||||
|
||||
# Needed for producing Debian packages
|
||||
cargo-deb
|
||||
|
||||
# Needed for CI to check validity of produced Debian packages (dpkg-deb)
|
||||
dpkg
|
||||
|
||||
engage
|
||||
|
||||
# Needed for Complement
|
||||
go
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
gotestfmt
|
||||
|
||||
# Needed for finding broken markdown links
|
||||
lychee
|
||||
|
||||
# Needed for linting markdown files
|
||||
markdownlint-cli
|
||||
|
||||
# Useful for editing the book locally
|
||||
mdbook
|
||||
|
||||
# used for rust caching in CI to speed it up
|
||||
sccache
|
||||
]
|
||||
# liburing is Linux-exclusive
|
||||
++ lib.optional stdenv.hostPlatform.isLinux liburing
|
||||
++ lib.optional stdenv.hostPlatform.isLinux numactl)
|
||||
++ scope.main.buildInputs
|
||||
++ scope.main.propagatedBuildInputs
|
||||
++ scope.main.nativeBuildInputs;
|
||||
};
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
packages =
|
||||
{
|
||||
default = scopeHost.main.override {
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
# Don't include experimental features
|
||||
"experimental"
|
||||
# jemalloc profiling/stats features are expensive and shouldn't
|
||||
# be expected on non-debug builds.
|
||||
"jemalloc_prof"
|
||||
"jemalloc_stats"
|
||||
# this is non-functional on nix for some reason
|
||||
# This is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
|
@ -203,23 +157,23 @@
|
|||
};
|
||||
default-debug = scopeHost.main.override {
|
||||
profile = "dev";
|
||||
# debug build users expect full logs
|
||||
# Debug build users expect full logs
|
||||
disable_release_max_log_level = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
# Don't include experimental features
|
||||
"experimental"
|
||||
# this is non-functional on nix for some reason
|
||||
# This is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
};
|
||||
# just a test profile used for things like CI and complement
|
||||
# Just a test profile used for things like CI and complement
|
||||
default-test = scopeHost.main.override {
|
||||
profile = "test";
|
||||
disable_release_max_log_level = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
# Don't include experimental features
|
||||
"experimental"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
|
@ -230,13 +184,13 @@
|
|||
all-features = scopeHost.main.override {
|
||||
all_features = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
# Don't include experimental features
|
||||
"experimental"
|
||||
# jemalloc profiling/stats features are expensive and shouldn't
|
||||
# be expected on non-debug builds.
|
||||
"jemalloc_prof"
|
||||
"jemalloc_stats"
|
||||
# this is non-functional on nix for some reason
|
||||
# This is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
|
@ -245,71 +199,24 @@
|
|||
all-features-debug = scopeHost.main.override {
|
||||
profile = "dev";
|
||||
all_features = true;
|
||||
# debug build users expect full logs
|
||||
# Debug build users expect full logs
|
||||
disable_release_max_log_level = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
# Don't include experimental features
|
||||
"experimental"
|
||||
# this is non-functional on nix for some reason
|
||||
# This is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
};
|
||||
hmalloc = scopeHost.main.override { features = [ "hardened_malloc" ]; };
|
||||
|
||||
oci-image = scopeHost.oci-image;
|
||||
oci-image-all-features = scopeHost.oci-image.override {
|
||||
main = scopeHost.main.override {
|
||||
all_features = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# jemalloc profiling/stats features are expensive and shouldn't
|
||||
# be expected on non-debug builds.
|
||||
"jemalloc_prof"
|
||||
"jemalloc_stats"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
};
|
||||
};
|
||||
oci-image-all-features-debug = scopeHost.oci-image.override {
|
||||
main = scopeHost.main.override {
|
||||
profile = "dev";
|
||||
all_features = true;
|
||||
# debug build users expect full logs
|
||||
disable_release_max_log_level = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
};
|
||||
};
|
||||
oci-image-hmalloc = scopeHost.oci-image.override {
|
||||
main = scopeHost.main.override {
|
||||
features = ["hardened_malloc"];
|
||||
};
|
||||
};
|
||||
|
||||
book = scopeHost.book;
|
||||
|
||||
complement = scopeHost.complement;
|
||||
static-complement = scopeHostStatic.complement;
|
||||
# macOS containers don't exist, so the complement images must be forced to linux
|
||||
linux-complement = (mkCrossScope "${pkgsHost.hostPlatform.qemuArch}-linux-musl").complement;
|
||||
}
|
||||
//
|
||||
builtins.listToAttrs
|
||||
(builtins.concatLists
|
||||
(builtins.map
|
||||
(crossSystem:
|
||||
// builtins.listToAttrs (
|
||||
builtins.concatLists (
|
||||
builtins.map
|
||||
(
|
||||
crossSystem:
|
||||
let
|
||||
binaryName = "static-${crossSystem}";
|
||||
scopeCrossStatic = mkCrossScope crossSystem;
|
||||
|
@ -326,7 +233,8 @@
|
|||
{
|
||||
name = "${binaryName}-x86_64-haswell-optimised";
|
||||
value = scopeCrossStatic.main.override {
|
||||
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
|
||||
x86_64_haswell_target_optimised =
|
||||
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -396,7 +304,8 @@
|
|||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
|
||||
x86_64_haswell_target_optimised =
|
||||
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -426,118 +335,6 @@
|
|||
features = [ "hardened_malloc" ];
|
||||
};
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that binary
|
||||
{
|
||||
name = "oci-image-${crossSystem}";
|
||||
value = scopeCrossStatic.oci-image;
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that binary with x86_64 haswell
|
||||
# target optimisations
|
||||
{
|
||||
name = "oci-image-${crossSystem}-x86_64-haswell-optimised";
|
||||
value = scopeCrossStatic.oci-image.override {
|
||||
main = scopeCrossStatic.main.override {
|
||||
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that unstripped debug ("dev") binary
|
||||
{
|
||||
name = "oci-image-${crossSystem}-debug";
|
||||
value = scopeCrossStatic.oci-image.override {
|
||||
main = scopeCrossStatic.main.override {
|
||||
profile = "dev";
|
||||
# debug build users expect full logs
|
||||
disable_release_max_log_level = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that binary with `--all-features`
|
||||
{
|
||||
name = "oci-image-${crossSystem}-all-features";
|
||||
value = scopeCrossStatic.oci-image.override {
|
||||
main = scopeCrossStatic.main.override {
|
||||
all_features = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# jemalloc profiling/stats features are expensive and shouldn't
|
||||
# be expected on non-debug builds.
|
||||
"jemalloc_prof"
|
||||
"jemalloc_stats"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that binary with `--all-features` and with x86_64 haswell
|
||||
# target optimisations
|
||||
{
|
||||
name = "oci-image-${crossSystem}-all-features-x86_64-haswell-optimised";
|
||||
value = scopeCrossStatic.oci-image.override {
|
||||
main = scopeCrossStatic.main.override {
|
||||
all_features = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# jemalloc profiling/stats features are expensive and shouldn't
|
||||
# be expected on non-debug builds.
|
||||
"jemalloc_prof"
|
||||
"jemalloc_stats"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that unstripped debug ("dev") binary with `--all-features`
|
||||
{
|
||||
name = "oci-image-${crossSystem}-all-features-debug";
|
||||
value = scopeCrossStatic.oci-image.override {
|
||||
main = scopeCrossStatic.main.override {
|
||||
profile = "dev";
|
||||
all_features = true;
|
||||
# debug build users expect full logs
|
||||
disable_release_max_log_level = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
# An output for an OCI image based on that binary with hardened_malloc
|
||||
{
|
||||
name = "oci-image-${crossSystem}-hmalloc";
|
||||
value = scopeCrossStatic.oci-image.override {
|
||||
main = scopeCrossStatic.main.override {
|
||||
features = ["hardened_malloc"];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
# An output for a complement OCI image for the specified platform
|
||||
{
|
||||
name = "complement-${crossSystem}";
|
||||
value = scopeCrossStatic.complement;
|
||||
}
|
||||
]
|
||||
)
|
||||
[
|
||||
|
@ -549,30 +346,6 @@
|
|||
]
|
||||
)
|
||||
);
|
||||
|
||||
devShells.default = mkDevShell scopeHostStatic;
|
||||
devShells.all-features = mkDevShell
|
||||
(scopeHostStatic.overrideScope (final: prev: {
|
||||
main = prev.main.override {
|
||||
all_features = true;
|
||||
disable_features = [
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# jemalloc profiling/stats features are expensive and shouldn't
|
||||
# be expected on non-debug builds.
|
||||
"jemalloc_prof"
|
||||
"jemalloc_stats"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
];
|
||||
};
|
||||
}));
|
||||
devShells.no-features = mkDevShell
|
||||
(scopeHostStatic.overrideScope (final: prev: {
|
||||
main = prev.main.override { default_features = false; };
|
||||
}));
|
||||
devShells.dynamic = mkDevShell scopeHost;
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
{ inputs
|
||||
|
||||
# Dependencies
|
||||
, main
|
||||
, mdbook
|
||||
, stdenv
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
inherit (main) pname version;
|
||||
|
||||
src = inputs.nix-filter {
|
||||
root = inputs.self;
|
||||
include = [
|
||||
"book.toml"
|
||||
"conduwuit-example.toml"
|
||||
"CODE_OF_CONDUCT.md"
|
||||
"CONTRIBUTING.md"
|
||||
"README.md"
|
||||
"development.md"
|
||||
"debian/conduwuit.service"
|
||||
"debian/README.md"
|
||||
"arch/conduwuit.service"
|
||||
"docs"
|
||||
"theme"
|
||||
];
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
mdbook
|
||||
];
|
||||
|
||||
buildPhase = ''
|
||||
mdbook build -d $out
|
||||
'';
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL
|
||||
BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz
|
||||
IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy
|
||||
NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m
|
||||
ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt
|
||||
/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88
|
||||
awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp
|
||||
L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK
|
||||
K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl
|
||||
8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV
|
||||
HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy
|
||||
ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw
|
||||
DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ
|
||||
irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+
|
||||
HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e
|
||||
VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3
|
||||
y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d
|
||||
jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4=
|
||||
-----END CERTIFICATE-----
|
|
@ -1,50 +0,0 @@
|
|||
[global]
|
||||
address = "0.0.0.0"
|
||||
allow_device_name_federation = true
|
||||
allow_guest_registration = true
|
||||
allow_public_room_directory_over_federation = true
|
||||
allow_public_room_directory_without_auth = true
|
||||
allow_registration = true
|
||||
database_path = "/database"
|
||||
log = "trace,h2=debug,hyper=debug"
|
||||
port = [8008, 8448]
|
||||
trusted_servers = []
|
||||
only_query_trusted_key_servers = false
|
||||
query_trusted_key_servers_first = false
|
||||
query_trusted_key_servers_first_on_join = false
|
||||
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true
|
||||
ip_range_denylist = []
|
||||
url_preview_domain_contains_allowlist = ["*"]
|
||||
url_preview_domain_explicit_denylist = ["*"]
|
||||
media_compat_file_link = false
|
||||
media_startup_check = true
|
||||
prune_missing_media = true
|
||||
log_colors = true
|
||||
admin_room_notices = false
|
||||
allow_check_for_updates = false
|
||||
intentionally_unknown_config_option_for_testing = true
|
||||
rocksdb_log_level = "info"
|
||||
rocksdb_max_log_files = 1
|
||||
rocksdb_recovery_mode = 0
|
||||
rocksdb_paranoid_file_checks = true
|
||||
log_guest_registrations = false
|
||||
allow_legacy_media = true
|
||||
startup_netburst = true
|
||||
startup_netburst_keep = -1
|
||||
|
||||
allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true
|
||||
|
||||
# valgrind makes things so slow
|
||||
dns_timeout = 60
|
||||
dns_attempts = 20
|
||||
request_conn_timeout = 60
|
||||
request_timeout = 120
|
||||
well_known_conn_timeout = 60
|
||||
well_known_timeout = 60
|
||||
federation_idle_timeout = 300
|
||||
sender_timeout = 300
|
||||
sender_idle_timeout = 300
|
||||
sender_retry_backoff_limit = 300
|
||||
|
||||
[global.tls]
|
||||
dual_protocol = true
|
|
@ -1,89 +0,0 @@
|
|||
# Dependencies
|
||||
{ bashInteractive
|
||||
, buildEnv
|
||||
, coreutils
|
||||
, dockerTools
|
||||
, lib
|
||||
, main
|
||||
, stdenv
|
||||
, tini
|
||||
, writeShellScriptBin
|
||||
}:
|
||||
|
||||
let
|
||||
main' = main.override {
|
||||
profile = "test";
|
||||
all_features = true;
|
||||
disable_release_max_log_level = true;
|
||||
disable_features = [
|
||||
# console/CLI stuff isn't used or relevant for complement
|
||||
"console"
|
||||
"tokio_console"
|
||||
# sentry telemetry isn't useful for complement, disabled by default anyways
|
||||
"sentry_telemetry"
|
||||
"perf_measurements"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# compression isn't needed for complement
|
||||
"brotli_compression"
|
||||
"gzip_compression"
|
||||
"zstd_compression"
|
||||
# complement doesn't need hot reloading
|
||||
"conduwuit_mods"
|
||||
# complement doesn't have URL preview media tests
|
||||
"url_preview"
|
||||
];
|
||||
};
|
||||
|
||||
start = writeShellScriptBin "start" ''
|
||||
set -euxo pipefail
|
||||
|
||||
${lib.getExe' coreutils "env"} \
|
||||
CONDUWUIT_SERVER_NAME="$SERVER_NAME" \
|
||||
${lib.getExe main'}
|
||||
'';
|
||||
in
|
||||
|
||||
dockerTools.buildImage {
|
||||
name = "complement-conduwuit";
|
||||
tag = "main";
|
||||
|
||||
copyToRoot = buildEnv {
|
||||
name = "root";
|
||||
pathsToLink = [
|
||||
"/bin"
|
||||
];
|
||||
paths = [
|
||||
bashInteractive
|
||||
coreutils
|
||||
main'
|
||||
start
|
||||
];
|
||||
};
|
||||
|
||||
config = {
|
||||
Cmd = [
|
||||
"${lib.getExe start}"
|
||||
];
|
||||
|
||||
Entrypoint = if !stdenv.hostPlatform.isDarwin
|
||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||
# are handled as expected
|
||||
then [ "${lib.getExe' tini "tini"}" "--" ]
|
||||
else [];
|
||||
|
||||
Env = [
|
||||
"CONDUWUIT_TLS__KEY=${./private_key.key}"
|
||||
"CONDUWUIT_TLS__CERTS=${./certificate.crt}"
|
||||
"CONDUWUIT_CONFIG=${./config.toml}"
|
||||
"RUST_BACKTRACE=full"
|
||||
];
|
||||
|
||||
ExposedPorts = {
|
||||
"8008/tcp" = {};
|
||||
"8448/tcp" = {};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb
|
||||
iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT
|
||||
LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a
|
||||
09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc
|
||||
ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga
|
||||
Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO
|
||||
/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu
|
||||
WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB
|
||||
DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb
|
||||
piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN
|
||||
D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ
|
||||
8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+
|
||||
3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq
|
||||
/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90
|
||||
FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q
|
||||
td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M
|
||||
Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A
|
||||
91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV
|
||||
8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh
|
||||
VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW
|
||||
UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K
|
||||
kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz
|
||||
KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7
|
||||
IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh
|
||||
tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM
|
||||
9MVtdgSkuh2gwkD/mMoAJXM=
|
||||
-----END PRIVATE KEY-----
|
|
@ -1,16 +0,0 @@
|
|||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK
|
||||
DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH
|
||||
uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR
|
||||
xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb
|
||||
o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B
|
||||
hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe
|
||||
vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB
|
||||
CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79
|
||||
ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3
|
||||
r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb
|
||||
XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK
|
||||
MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76
|
||||
U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/
|
||||
-----END CERTIFICATE REQUEST-----
|
|
@ -1,12 +0,0 @@
|
|||
authorityKeyIdentifier=keyid,issuer
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[alt_names]
|
||||
DNS.1 = *.docker.internal
|
||||
DNS.2 = hs1
|
||||
DNS.3 = hs2
|
||||
DNS.4 = hs3
|
||||
DNS.5 = hs4
|
||||
IP.1 = 127.0.0.1
|
|
@ -1,220 +0,0 @@
|
|||
# Dependencies (keep sorted)
|
||||
{ craneLib
|
||||
, inputs
|
||||
, jq
|
||||
, lib
|
||||
, libiconv
|
||||
, liburing
|
||||
, pkgsBuildHost
|
||||
, rocksdb
|
||||
, removeReferencesTo
|
||||
, rust
|
||||
, rust-jemalloc-sys
|
||||
, stdenv
|
||||
|
||||
# Options (keep sorted)
|
||||
, all_features ? false
|
||||
, default_features ? true
|
||||
# default list of disabled features
|
||||
, disable_features ? [
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# jemalloc profiling/stats features are expensive and shouldn't
|
||||
# be expected on non-debug builds.
|
||||
"jemalloc_prof"
|
||||
"jemalloc_stats"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
]
|
||||
, disable_release_max_log_level ? false
|
||||
, features ? []
|
||||
, profile ? "release"
|
||||
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
|
||||
# haswell is pretty much any x86 cpu made in the last 12 years, and
|
||||
# supports modern CPU extensions that rocksdb can make use of.
|
||||
# disable if trying to make a portable x86_64 build for very old hardware
|
||||
, x86_64_haswell_target_optimised ? false
|
||||
}:
|
||||
|
||||
let
|
||||
# We perform default-feature unification in nix, because some of the dependencies
|
||||
# on the nix side depend on feature values.
|
||||
crateFeatures = path:
|
||||
let manifest = lib.importTOML "${path}/Cargo.toml"; in
|
||||
lib.remove "default" (lib.attrNames manifest.features);
|
||||
crateDefaultFeatures = path:
|
||||
(lib.importTOML "${path}/Cargo.toml").features.default;
|
||||
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
|
||||
allFeatures = crateFeatures "${inputs.self}/src/main";
|
||||
features' = lib.unique
|
||||
(features ++
|
||||
lib.optionals default_features allDefaultFeatures ++
|
||||
lib.optionals all_features allFeatures);
|
||||
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level ["release_max_log_level"];
|
||||
features'' = lib.subtractLists disable_features' features';
|
||||
|
||||
featureEnabled = feature : builtins.elem feature features'';
|
||||
|
||||
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
|
||||
|
||||
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
|
||||
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
|
||||
# own. In order for this to work, we need to set flags on the build that match
|
||||
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
|
||||
# which features we enable in tikv-jemalloc-sys.
|
||||
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
|
||||
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
|
||||
unprefixed = true;
|
||||
}).overrideAttrs (old: {
|
||||
configureFlags = old.configureFlags ++
|
||||
# we dont need docs
|
||||
[ "--disable-doc" ] ++
|
||||
# we dont need cxx/C++ integration
|
||||
[ "--disable-cxx" ] ++
|
||||
# tikv-jemalloc-sys/profiling feature
|
||||
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
|
||||
# tikv-jemalloc-sys/stats feature
|
||||
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
|
||||
});
|
||||
|
||||
buildDepsOnlyEnv =
|
||||
let
|
||||
rocksdb' = (rocksdb.override {
|
||||
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
|
||||
# rocksdb fails to build with prefixed jemalloc, which is required on
|
||||
# darwin due to [1]. In this case, fall back to building rocksdb with
|
||||
# libc malloc. This should not cause conflicts, because all of the
|
||||
# jemalloc symbols are prefixed.
|
||||
#
|
||||
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
|
||||
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
|
||||
|
||||
# for some reason enableLiburing in nixpkgs rocksdb is default true
|
||||
# which breaks Darwin entirely
|
||||
enableLiburing = enableLiburing;
|
||||
}).overrideAttrs (old: {
|
||||
enableLiburing = enableLiburing;
|
||||
cmakeFlags = (if x86_64_haswell_target_optimised then (lib.subtractLists [
|
||||
# dont make a portable build if x86_64_haswell_target_optimised is enabled
|
||||
"-DPORTABLE=1"
|
||||
] old.cmakeFlags
|
||||
++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ])
|
||||
)
|
||||
++ old.cmakeFlags;
|
||||
|
||||
# outputs has "tools" which we dont need or use
|
||||
outputs = [ "out" ];
|
||||
|
||||
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
|
||||
preInstall = "";
|
||||
});
|
||||
in
|
||||
{
|
||||
# https://crane.dev/faq/rebuilds-bindgen.html
|
||||
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
|
||||
|
||||
CARGO_PROFILE = profile;
|
||||
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
|
||||
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
|
||||
}
|
||||
//
|
||||
(import ./cross-compilation-env.nix {
|
||||
# Keep sorted
|
||||
inherit
|
||||
lib
|
||||
pkgsBuildHost
|
||||
rust
|
||||
stdenv;
|
||||
});
|
||||
|
||||
buildPackageEnv = {
|
||||
CONDUWUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
|
||||
} // buildDepsOnlyEnv // {
|
||||
# Only needed in static stdenv because these are transitive dependencies of rocksdb
|
||||
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
|
||||
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
|
||||
" -L${lib.getLib liburing}/lib -luring"
|
||||
+ lib.optionalString x86_64_haswell_target_optimised
|
||||
" -Ctarget-cpu=haswell";
|
||||
};
|
||||
|
||||
|
||||
|
||||
commonAttrs = {
|
||||
inherit
|
||||
(craneLib.crateNameFromCargoToml {
|
||||
cargoToml = "${inputs.self}/Cargo.toml";
|
||||
})
|
||||
pname
|
||||
version;
|
||||
|
||||
src = let filter = inputs.nix-filter.lib; in filter {
|
||||
root = inputs.self;
|
||||
|
||||
# Keep sorted
|
||||
include = [
|
||||
".cargo"
|
||||
"Cargo.lock"
|
||||
"Cargo.toml"
|
||||
"src"
|
||||
];
|
||||
};
|
||||
|
||||
doCheck = true;
|
||||
|
||||
cargoExtraArgs = "--no-default-features --locked "
|
||||
+ lib.optionalString
|
||||
(features'' != [])
|
||||
"--features " + (builtins.concatStringsSep "," features'');
|
||||
|
||||
dontStrip = profile == "dev" || profile == "test";
|
||||
dontPatchELF = profile == "dev" || profile == "test";
|
||||
|
||||
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
|
||||
# needed to build Rust applications on macOS
|
||||
++ lib.optionals stdenv.hostPlatform.isDarwin [
|
||||
# https://github.com/NixOS/nixpkgs/issues/206242
|
||||
# ld: library not found for -liconv
|
||||
libiconv
|
||||
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
|
||||
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
|
||||
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
|
||||
];
|
||||
|
||||
nativeBuildInputs = [
|
||||
# bindgen needs the build platform's libclang. Apparently due to "splicing
|
||||
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
|
||||
# right thing here.
|
||||
pkgsBuildHost.rustPlatform.bindgenHook
|
||||
|
||||
# We don't actually depend on `jq`, but crane's `buildPackage` does, but
|
||||
# its `buildDepsOnly` doesn't. This causes those two derivations to have
|
||||
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
|
||||
# rebuilds of bindgen and its depedents.
|
||||
jq
|
||||
];
|
||||
};
|
||||
in
|
||||
|
||||
craneLib.buildPackage ( commonAttrs // {
|
||||
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
|
||||
env = buildDepsOnlyEnv;
|
||||
});
|
||||
|
||||
doCheck = true;
|
||||
|
||||
cargoExtraArgs = "--no-default-features --locked "
|
||||
+ lib.optionalString
|
||||
(features'' != [])
|
||||
"--features " + (builtins.concatStringsSep "," features'');
|
||||
|
||||
env = buildPackageEnv;
|
||||
|
||||
passthru = {
|
||||
env = buildPackageEnv;
|
||||
};
|
||||
|
||||
meta.mainProgram = commonAttrs.pname;
|
||||
})
|
|
@ -1,46 +0,0 @@
|
|||
{ inputs
|
||||
|
||||
# Dependencies
|
||||
, dockerTools
|
||||
, lib
|
||||
, main
|
||||
, stdenv
|
||||
, tini
|
||||
}:
|
||||
|
||||
dockerTools.buildLayeredImage {
|
||||
name = main.pname;
|
||||
tag = "main";
|
||||
created = "@${toString inputs.self.lastModified}";
|
||||
contents = [
|
||||
dockerTools.caCertificates
|
||||
main
|
||||
];
|
||||
config = {
|
||||
Entrypoint = if !stdenv.hostPlatform.isDarwin
|
||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||
# are handled as expected
|
||||
then [ "${lib.getExe' tini "tini"}" "--" ]
|
||||
else [];
|
||||
Cmd = [
|
||||
"${lib.getExe main}"
|
||||
];
|
||||
Env = [
|
||||
"RUST_BACKTRACE=full"
|
||||
];
|
||||
Labels = {
|
||||
"org.opencontainers.image.authors" = "June Clementine Strawberry <june@girlboss.ceo> and Jason Volk
|
||||
<jason@zemos.net>";
|
||||
"org.opencontainers.image.created" ="@${toString inputs.self.lastModified}";
|
||||
"org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust";
|
||||
"org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/";
|
||||
"org.opencontainers.image.licenses" = "Apache-2.0";
|
||||
"org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or "";
|
||||
"org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit";
|
||||
"org.opencontainers.image.title" = main.pname;
|
||||
"org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/";
|
||||
"org.opencontainers.image.vendor" = "girlbossceo";
|
||||
"org.opencontainers.image.version" = main.version;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,21 +1,24 @@
|
|||
[Unit]
|
||||
Description=conduwuit Matrix homeserver
|
||||
Description=Continuwuity - Matrix homeserver
|
||||
Documentation=https://continuwuity.org/
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
Alias=matrix-conduwuit.service
|
||||
Documentation=https://conduwuit.puppyirl.gay/
|
||||
|
||||
[Service]
|
||||
DynamicUser=yes
|
||||
User=conduwuit
|
||||
Group=conduwuit
|
||||
Type=notify
|
||||
Type=notify-reload
|
||||
ReloadSignal=SIGUSR1
|
||||
|
||||
Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||
|
||||
ExecStart=/usr/sbin/conduwuit
|
||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
|
||||
|
||||
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
|
||||
ExecStart=/usr/bin/conduwuit
|
||||
|
||||
AmbientCapabilities=
|
||||
CapabilityBoundingSet=
|
||||
|
@ -48,16 +51,17 @@ SystemCallArchitectures=native
|
|||
SystemCallFilter=@system-service @resources
|
||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||
SystemCallErrorNumber=EPERM
|
||||
#StateDirectory=conduwuit
|
||||
|
||||
StateDirectory=conduwuit
|
||||
ConfigurationDirectory=conduwuit
|
||||
RuntimeDirectory=conduwuit
|
||||
RuntimeDirectoryMode=0750
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
TimeoutStopSec=2m
|
||||
TimeoutStartSec=2m
|
||||
TimeoutStopSec=4m
|
||||
TimeoutStartSec=4m
|
||||
|
||||
StartLimitInterval=1m
|
||||
StartLimitBurst=5
|
23
pkg/debian/README.md
Normal file
23
pkg/debian/README.md
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Continuwuity for Debian
|
||||
|
||||
This document provides information about downloading and deploying the Debian package. You can also use this guide for other `apt`-based distributions such as Ubuntu.
|
||||
|
||||
### Installation
|
||||
|
||||
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
|
||||
|
||||
No `apt` repository is currently available. This feature is in development.
|
||||
|
||||
### Configuration
|
||||
|
||||
After installation, Continuwuity places the example configuration at `/etc/conduwuit/conduwuit.toml` as the default configuration file. The configuration file indicates which settings you must change before starting the service.
|
||||
|
||||
You can customize additional settings by uncommenting and modifying the configuration options in `/etc/conduwuit/conduwuit.toml`.
|
||||
|
||||
### Running
|
||||
|
||||
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/sbin/conduwuit`.
|
||||
|
||||
By default, this package assumes that Continuwuity runs behind a reverse proxy. The default configuration options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS. To federate properly, you must set up TLS certificates and certificate renewal.
|
||||
|
||||
For information about setting up a reverse proxy and TLS, consult online documentation and guides. The [generic deployment guide](../deploying/generic.md#setting-up-the-reverse-proxy) documents Caddy, which is the most user-friendly option for reverse proxy configuration.
|
20
pkg/debian/postinst
Normal file
20
pkg/debian/postinst
Normal file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# TODO: implement debconf support that is maintainable without duplicating the config
|
||||
#. /usr/share/debconf/confmodule
|
||||
|
||||
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
||||
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
echo ''
|
||||
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
||||
echo 'To start the server, run: systemctl start conduwuit.service'
|
||||
echo ''
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
|
@ -20,24 +20,18 @@ case $1 in
|
|||
|
||||
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
|
||||
if test -L "$CONDUWUIT_CONFIG_PATH"; then
|
||||
echo "Deleting conduwuit configuration files"
|
||||
echo "Deleting continuwuity configuration files"
|
||||
rm -v -r "$CONDUWUIT_CONFIG_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
|
||||
if test -L "$CONDUWUIT_DATABASE_PATH"; then
|
||||
echo "Deleting conduwuit database directory"
|
||||
echo "Deleting continuwuity database directory"
|
||||
rm -r "$CONDUWUIT_DATABASE_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
|
||||
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
|
||||
echo "Removing matrix-conduit symlink"
|
||||
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
80
pkg/fedora/continuwuity.spec.rpkg
Normal file
80
pkg/fedora/continuwuity.spec.rpkg
Normal file
|
@ -0,0 +1,80 @@
|
|||
# This should be run using rpkg-util: https://docs.pagure.org/rpkg-util
|
||||
# it requires Internet access and is not suitable for Fedora main repos
|
||||
# TODO: rpkg-util is no longer maintained, find a replacement
|
||||
|
||||
Name: continuwuity
|
||||
Version: {{{ git_repo_version }}}
|
||||
Release: 1%{?dist}
|
||||
Summary: Very cool Matrix chat homeserver written in Rust
|
||||
|
||||
License: Apache-2.0 AND MIT
|
||||
|
||||
URL: https://continuwuity.org
|
||||
VCS: {{{ git_repo_vcs }}}
|
||||
Source: {{{ git_repo_pack }}}
|
||||
|
||||
BuildRequires: cargo-rpm-macros >= 25
|
||||
BuildRequires: systemd-rpm-macros
|
||||
# Needed to build rust-librocksdb-sys
|
||||
BuildRequires: clang
|
||||
BuildRequires: liburing-devel
|
||||
|
||||
Requires: liburing
|
||||
Requires: glibc
|
||||
Requires: libstdc++
|
||||
|
||||
%global _description %{expand:
|
||||
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
|
||||
|
||||
%description %{_description}
|
||||
|
||||
%prep
|
||||
{{{ git_repo_setup_macro }}}
|
||||
%cargo_prep -N
|
||||
# Perform an online build so Git dependencies can be retrieved
|
||||
sed -i 's/^offline = true$//' .cargo/config.toml
|
||||
|
||||
%build
|
||||
%cargo_build
|
||||
|
||||
# Here's the one legally required mystery incantation in this file.
|
||||
# Some of our dependencies have source files which are (for some reason) marked as executable.
|
||||
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
|
||||
# at the end of the build step, and then the BRP shebang mangling script checks
|
||||
# the entire buildroot to find executable files, and fails the build because
|
||||
# it thinks Rust's file attributes are shebangs because they start with `#!`.
|
||||
# So we have to clear the executable bit on all of them before that happens.
|
||||
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
|
||||
|
||||
# TODO: this fails currently because it's forced to run in offline mode
|
||||
# {cargo_license -- --no-dev} > LICENSE.dependencies
|
||||
|
||||
%install
|
||||
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
|
||||
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
|
||||
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%license src/core/matrix/state_res/LICENSE
|
||||
%doc CODE_OF_CONDUCT.md
|
||||
%doc CONTRIBUTING.md
|
||||
%doc README.md
|
||||
%doc SECURITY.md
|
||||
%config %{_sysconfdir}/conduwuit/conduwuit.toml
|
||||
|
||||
%{_bindir}/conduwuit
|
||||
%{_unitdir}/conduwuit.service
|
||||
# Do not create /var/lib/conduwuit, systemd will create it if necessary
|
||||
|
||||
%post
|
||||
%systemd_post conduwuit.service
|
||||
|
||||
%preun
|
||||
%systemd_preun conduwuit.service
|
||||
|
||||
%postun
|
||||
%systemd_postun_with_restart conduwuit.service
|
||||
|
||||
%changelog
|
||||
{{{ git_repo_changelog }}}
|
|
@ -4,7 +4,8 @@
|
|||
, stdenv
|
||||
}:
|
||||
|
||||
lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
||||
lib.optionalAttrs stdenv.hostPlatform.isStatic
|
||||
{
|
||||
ROCKSDB_STATIC = "";
|
||||
}
|
||||
//
|
||||
|
@ -12,12 +13,7 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
|
|||
CARGO_BUILD_RUSTFLAGS =
|
||||
lib.concatStringsSep
|
||||
" "
|
||||
([]
|
||||
# This disables PIE for static builds, which isn't great in terms
|
||||
# of security. Unfortunately, my hand is forced because nixpkgs'
|
||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||
# leaving PIE enabled.
|
||||
++ lib.optionals
|
||||
(lib.optionals
|
||||
stdenv.hostPlatform.isStatic
|
||||
[ "-C" "relocation-model=static" ]
|
||||
++ lib.optionals
|
224
pkg/nix/pkgs/main/default.nix
Normal file
224
pkg/nix/pkgs/main/default.nix
Normal file
|
@ -0,0 +1,224 @@
|
|||
# Dependencies (keep sorted)
|
||||
{ craneLib
|
||||
, inputs
|
||||
, jq
|
||||
, lib
|
||||
, libiconv
|
||||
, liburing
|
||||
, pkgsBuildHost
|
||||
, rocksdb
|
||||
, removeReferencesTo
|
||||
, rust
|
||||
, rust-jemalloc-sys
|
||||
, stdenv
|
||||
|
||||
# Options (keep sorted)
|
||||
, all_features ? false
|
||||
, default_features ? true
|
||||
# default list of disabled features
|
||||
, disable_features ? [
|
||||
# dont include experimental features
|
||||
"experimental"
|
||||
# jemalloc profiling/stats features are expensive and shouldn't
|
||||
# be expected on non-debug builds.
|
||||
"jemalloc_prof"
|
||||
"jemalloc_stats"
|
||||
# this is non-functional on nix for some reason
|
||||
"hardened_malloc"
|
||||
# conduwuit_mods is a development-only hot reload feature
|
||||
"conduwuit_mods"
|
||||
]
|
||||
, disable_release_max_log_level ? false
|
||||
, features ? [ ]
|
||||
, profile ? "release"
|
||||
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
|
||||
# haswell is pretty much any x86 cpu made in the last 12 years, and
|
||||
# supports modern CPU extensions that rocksdb can make use of.
|
||||
# disable if trying to make a portable x86_64 build for very old hardware
|
||||
, x86_64_haswell_target_optimised ? false
|
||||
}:
|
||||
|
||||
let
|
||||
# We perform default-feature unification in nix, because some of the dependencies
|
||||
# on the nix side depend on feature values.
|
||||
crateFeatures = path:
|
||||
let manifest = lib.importTOML "${path}/Cargo.toml"; in
|
||||
lib.remove "default" (lib.attrNames manifest.features);
|
||||
crateDefaultFeatures = path:
|
||||
(lib.importTOML "${path}/Cargo.toml").features.default;
|
||||
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
|
||||
allFeatures = crateFeatures "${inputs.self}/src/main";
|
||||
features' = lib.unique
|
||||
(features ++
|
||||
lib.optionals default_features allDefaultFeatures ++
|
||||
lib.optionals all_features allFeatures);
|
||||
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
|
||||
features'' = lib.subtractLists disable_features' features';
|
||||
|
||||
featureEnabled = feature: builtins.elem feature features'';
|
||||
|
||||
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
|
||||
|
||||
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
|
||||
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
|
||||
# own. In order for this to work, we need to set flags on the build that match
|
||||
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
|
||||
# which features we enable in tikv-jemalloc-sys.
|
||||
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
|
||||
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
|
||||
unprefixed = true;
|
||||
}).overrideAttrs (old: {
|
||||
configureFlags = old.configureFlags ++
|
||||
# we dont need docs
|
||||
[ "--disable-doc" ] ++
|
||||
# we dont need cxx/C++ integration
|
||||
[ "--disable-cxx" ] ++
|
||||
# tikv-jemalloc-sys/profiling feature
|
||||
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
|
||||
# tikv-jemalloc-sys/stats feature
|
||||
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
|
||||
});
|
||||
|
||||
buildDepsOnlyEnv =
|
||||
let
|
||||
rocksdb' = (rocksdb.override {
|
||||
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
|
||||
# rocksdb fails to build with prefixed jemalloc, which is required on
|
||||
# darwin due to [1]. In this case, fall back to building rocksdb with
|
||||
# libc malloc. This should not cause conflicts, because all of the
|
||||
# jemalloc symbols are prefixed.
|
||||
#
|
||||
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
|
||||
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
|
||||
|
||||
# for some reason enableLiburing in nixpkgs rocksdb is default true
|
||||
# which breaks Darwin entirely
|
||||
inherit enableLiburing;
|
||||
}).overrideAttrs (old: {
|
||||
inherit enableLiburing;
|
||||
cmakeFlags = (if x86_64_haswell_target_optimised then
|
||||
(lib.subtractLists [
|
||||
# dont make a portable build if x86_64_haswell_target_optimised is enabled
|
||||
"-DPORTABLE=1"
|
||||
]
|
||||
old.cmakeFlags
|
||||
++ [ "-DPORTABLE=haswell" ]) else [ "-DPORTABLE=1" ]
|
||||
)
|
||||
++ old.cmakeFlags;
|
||||
|
||||
# outputs has "tools" which we dont need or use
|
||||
outputs = [ "out" ];
|
||||
|
||||
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
|
||||
preInstall = "";
|
||||
});
|
||||
in
|
||||
{
|
||||
# https://crane.dev/faq/rebuilds-bindgen.html
|
||||
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
|
||||
|
||||
CARGO_PROFILE = profile;
|
||||
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
|
||||
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
|
||||
}
|
||||
//
|
||||
(import ./cross-compilation-env.nix {
|
||||
# Keep sorted
|
||||
inherit
|
||||
lib
|
||||
pkgsBuildHost
|
||||
rust
|
||||
stdenv;
|
||||
});
|
||||
|
||||
buildPackageEnv = {
|
||||
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
|
||||
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
|
||||
} // buildDepsOnlyEnv // {
|
||||
# Only needed in static stdenv because these are transitive dependencies of rocksdb
|
||||
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
|
||||
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
|
||||
" -L${lib.getLib liburing}/lib -luring"
|
||||
+ lib.optionalString x86_64_haswell_target_optimised
|
||||
" -Ctarget-cpu=haswell";
|
||||
};
|
||||
|
||||
|
||||
|
||||
commonAttrs = {
|
||||
inherit
|
||||
(craneLib.crateNameFromCargoToml {
|
||||
cargoToml = "${inputs.self}/Cargo.toml";
|
||||
})
|
||||
pname
|
||||
version;
|
||||
|
||||
src = let filter = inputs.nix-filter.lib; in filter {
|
||||
root = inputs.self;
|
||||
|
||||
# Keep sorted
|
||||
include = [
|
||||
".cargo"
|
||||
"Cargo.lock"
|
||||
"Cargo.toml"
|
||||
"src"
|
||||
"xtask"
|
||||
];
|
||||
};
|
||||
|
||||
doCheck = true;
|
||||
|
||||
cargoExtraArgs = "--no-default-features --locked "
|
||||
+ lib.optionalString
|
||||
(features'' != [ ])
|
||||
"--features " + (builtins.concatStringsSep "," features'');
|
||||
|
||||
dontStrip = profile == "dev" || profile == "test";
|
||||
dontPatchELF = profile == "dev" || profile == "test";
|
||||
|
||||
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
|
||||
# needed to build Rust applications on macOS
|
||||
++ lib.optionals stdenv.hostPlatform.isDarwin [
|
||||
# https://github.com/NixOS/nixpkgs/issues/206242
|
||||
# ld: library not found for -liconv
|
||||
libiconv
|
||||
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
|
||||
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
|
||||
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
|
||||
];
|
||||
|
||||
nativeBuildInputs = [
|
||||
# bindgen needs the build platform's libclang. Apparently due to "splicing
|
||||
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
|
||||
# right thing here.
|
||||
pkgsBuildHost.rustPlatform.bindgenHook
|
||||
|
||||
# We don't actually depend on `jq`, but crane's `buildPackage` does, but
|
||||
# its `buildDepsOnly` doesn't. This causes those two derivations to have
|
||||
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
|
||||
# rebuilds of bindgen and its depedents.
|
||||
jq
|
||||
];
|
||||
};
|
||||
in
|
||||
|
||||
craneLib.buildPackage (commonAttrs // {
|
||||
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
|
||||
env = buildDepsOnlyEnv;
|
||||
});
|
||||
|
||||
doCheck = true;
|
||||
|
||||
cargoExtraArgs = "--no-default-features --locked "
|
||||
+ lib.optionalString
|
||||
(features'' != [ ])
|
||||
"--features " + (builtins.concatStringsSep "," features'');
|
||||
|
||||
env = buildPackageEnv;
|
||||
|
||||
passthru = {
|
||||
env = buildPackageEnv;
|
||||
};
|
||||
|
||||
meta.mainProgram = commonAttrs.pname;
|
||||
})
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue