Compare commits

..

15 commits

Author SHA1 Message Date
Jacob Taylor
db2e7690c8 more funny settings (part 3 of 12)
Some checks failed
Checks / Prefligit / prefligit (push) Failing after 5s
Release Docker Image / define-variables (push) Failing after 2s
Release Docker Image / build-image (linux/amd64, release, linux-amd64, base) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, release, linux-arm64, base) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Checks / Rust / Format (push) Failing after 3s
Checks / Rust / Clippy (push) Failing after 16s
Checks / Rust / Cargo Test (push) Failing after 15s
2025-07-03 14:44:28 -07:00
Jacob Taylor
1f1cde297d sender_workers scaling. this time, with feeling! 2025-07-03 14:44:28 -07:00
Jacob Taylor
2e80200c64 vehicle loan documentation now available at window 7 2025-07-03 14:44:27 -07:00
Jacob Taylor
9e8c193840 lock the getter instead ??? c/o M 2025-07-03 14:39:10 -07:00
Jacob Taylor
2b61f91f1f make fetching key room events less smart 2025-07-03 14:31:50 -07:00
Jacob Taylor
59cd268a3f change rocksdb stats level to 3
scale rocksdb background jobs and subcompactions

change rocksdb default error level to info from error

delete unused num_threads function

fix warns from cargo
2025-07-03 14:31:50 -07:00
nexy7574
95a92e5f59 log which room struggled to get mainline depth 2025-07-03 14:30:26 -07:00
nexy7574
2f643b8fbf more logs 2025-07-03 14:30:26 -07:00
nexy7574
ddc0b9fb1d Fix room ID check 2025-07-03 13:12:43 -07:00
nexy7574
64b89a4365 Kick up a fuss when m.room.create is unfindable 2025-07-03 13:12:43 -07:00
nexy7574
8ed76fa38c Note about ruma#2064 in TODO 2025-07-03 13:12:43 -07:00
nexy7574
93a6442315 fix an auth rule not applying correctly 2025-07-03 13:12:43 -07:00
Jacob Taylor
1383fd198a upgrade some settings to enable 5g in continuwuity
enable converged 6g at the edge in continuwuity

better stateinfo_cache_capacity default

better roomid_spacehierarchy_cache_capacity

make sender workers default better and clamp value to core count

update sender workers documentation

add more parallelism_scaled and make them public

update 1 document
2025-07-03 13:12:43 -07:00
Jacob Taylor
6262c61b0a bump the number of allowed immutable memtables by 1, to allow for greater flood protection
this should probably not be applied if you have rocksdb_atomic_flush = false (the default)
2025-07-03 13:12:43 -07:00
Jacob Taylor
c4a80540c0 probably incorrectly delete support for non-standardized matrix srv record 2025-07-03 13:12:43 -07:00
72 changed files with 1313 additions and 4102 deletions

View file

@ -1,2 +0,0 @@
[alias]
xtask = "run --package xtask --"

View file

@ -1,55 +0,0 @@
version: 1
x-source: &source forgejo.ellis.link/continuwuation/continuwuity
x-tags:
releases: &tags-releases
tags:
allow:
- "latest"
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
- "v[0-9]+\\.[0-9]+"
- "v[0-9]+"
main: &tags-main
tags:
allow:
- "latest"
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
- "v[0-9]+\\.[0-9]+"
- "v[0-9]+"
- "main"
commits: &tags-commits
tags:
allow:
- "latest"
- "v[0-9]+\\.[0-9]+\\.[0-9]+(-[a-z0-9\\.]+)?"
- "v[0-9]+\\.[0-9]+"
- "v[0-9]+"
- "main"
- "sha-[a-f0-9]+"
all: &tags-all
tags:
allow:
- ".*"
# Registry credentials
creds:
- registry: forgejo.ellis.link
user: "{{env \"BUILTIN_REGISTRY_USER\"}}"
pass: "{{env \"BUILTIN_REGISTRY_PASSWORD\"}}"
- registry: registry.gitlab.com
user: "{{env \"GITLAB_USERNAME\"}}"
pass: "{{env \"GITLAB_TOKEN\"}}"
# Global defaults
defaults:
parallel: 3
interval: 2h
digestTags: true
# Sync configuration - each registry gets different image sets
sync:
- source: *source
target: registry.gitlab.com/continuwuity/continuwuity
type: repository
<<: *tags-main

View file

@ -11,16 +11,16 @@ concurrency:
jobs:
build-and-deploy:
name: 🏗️ Build and Deploy
name: Build and Deploy Element Web
runs-on: ubuntu-latest
steps:
- name: 📦 Setup Node.js
uses: https://github.com/actions/setup-node@v4
- name: Setup Node.js
uses: https://code.forgejo.org/actions/setup-node@v4
with:
node-version: "22"
node-version: "20"
- name: 🔨 Clone, setup, and build Element Web
- name: Clone, setup, and build Element Web
run: |
echo "Cloning Element Web..."
git clone https://github.com/maunium/element-web
@ -64,7 +64,7 @@ jobs:
echo "Checking for build output..."
ls -la webapp/
- name: ⚙️ Create config.json
- name: Create config.json
run: |
cat <<EOF > ./element-web/webapp/config.json
{
@ -100,25 +100,28 @@ jobs:
echo "Created ./element-web/webapp/config.json"
cat ./element-web/webapp/config.json
- name: 📤 Upload Artifact
- name: Upload Artifact
uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: element-web
path: ./element-web/webapp/
retention-days: 14
- name: 🛠️ Install Wrangler
- name: Install Wrangler
run: npm install --save-dev wrangler@latest
- name: 🚀 Deploy to Cloudflare Pages
if: vars.CLOUDFLARE_PROJECT_NAME != ''
id: deploy
- name: Deploy to Cloudflare Pages (Production)
if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
uses: https://github.com/cloudflare/wrangler-action@v3
with:
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
command: >-
pages deploy ./element-web/webapp
--branch="${{ github.ref == 'refs/heads/main' && 'main' || github.head_ref || github.ref_name }}"
--commit-dirty=true
--project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
- name: Deploy to Cloudflare Pages (Preview)
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
uses: https://github.com/cloudflare/wrangler-action@v3
with:
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"

View file

@ -1,47 +0,0 @@
name: Mirror Container Images
on:
schedule:
# Run every 2 hours
- cron: "0 */2 * * *"
workflow_dispatch:
inputs:
dry_run:
description: 'Dry run (check only, no actual mirroring)'
required: false
default: false
type: boolean
concurrency:
group: "mirror-images"
cancel-in-progress: true
jobs:
mirror-images:
runs-on: ubuntu-latest
env:
BUILTIN_REGISTRY_USER: ${{ vars.BUILTIN_REGISTRY_USER }}
BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }}
GITLAB_USERNAME: ${{ vars.GITLAB_USERNAME }}
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install regctl
uses: https://forgejo.ellis.link/continuwuation/regclient-actions/regctl-installer@main
with:
binary: regsync
- name: Check what images need mirroring
run: |
echo "Checking images that need mirroring..."
regsync check -c .forgejo/regsync/regsync.yml -v info
- name: Mirror images
if: ${{ !inputs.dry_run }}
run: |
echo "Starting image mirroring..."
regsync once -c .forgejo/regsync/regsync.yml -v info

View file

@ -204,31 +204,13 @@ jobs:
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Extract binary from container (image)
id: extract-binary-image
run: |
mkdir -p /tmp/binaries
digest="${{ steps.build.outputs.digest }}"
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.define-variables.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
- name: Extract binary from container (copy)
run: docker cp ${{ steps.extract-binary-image.outputs.container_id }}:/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
- name: Extract binary from container (cleanup)
run: docker rm ${{ steps.extract-binary-image.outputs.container_id }}
- name: Upload binary artifact
uses: forgejo/upload-artifact@v4
with:
name: conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
path: /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
if-no-files-found: error
- name: Upload digest
uses: forgejo/upload-artifact@v4
with:
name: digests-${{ matrix.slug }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 5
retention-days: 1
merge:
runs-on: dind
@ -256,13 +238,12 @@ jobs:
uses: docker/metadata-action@v5
with:
tags: |
type=semver,pattern={{version}},prefix=v
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
type=semver,pattern=v{{version}}
type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }}
type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }}
type=ref,event=pr
type=sha,format=long
type=raw,value=latest,enable=${{ !startsWith(github.ref, 'refs/tags/v') }}
images: ${{needs.define-variables.outputs.images}}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
env:

145
Cargo.lock generated
View file

@ -50,56 +50,12 @@ dependencies = [
"alloc-no-stdlib",
]
[[package]]
name = "anstream"
version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"is_terminal_polyfill",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd"
[[package]]
name = "anstyle-parse"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa"
dependencies = [
"anstyle",
"once_cell_polyfill",
"windows-sys 0.59.0",
]
[[package]]
name = "anyhow"
version = "1.0.98"
@ -764,25 +720,14 @@ dependencies = [
"clap_derive",
]
[[package]]
name = "clap-markdown"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2a2617956a06d4885b490697b5307ebb09fec10b088afc18c81762d848c2339"
dependencies = [
"clap",
]
[[package]]
name = "clap_builder"
version = "4.5.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e"
dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim",
]
[[package]]
@ -803,16 +748,6 @@ version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
[[package]]
name = "clap_mangen"
version = "0.2.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "724842fa9b144f9b89b3f3d371a89f3455eea660361d13a554f68f8ae5d6c13a"
dependencies = [
"clap",
"roff",
]
[[package]]
name = "cmake"
version = "0.1.54"
@ -828,12 +763,6 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
[[package]]
name = "colorchoice"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
[[package]]
name = "concurrent-queue"
version = "2.5.0"
@ -868,7 +797,6 @@ dependencies = [
"tokio-metrics",
"tracing",
"tracing-flame",
"tracing-journald",
"tracing-opentelemetry",
"tracing-subscriber",
]
@ -1076,7 +1004,6 @@ dependencies = [
"loole",
"lru-cache",
"rand 0.8.5",
"recaptcha-verify",
"regex",
"reqwest",
"ruma",
@ -2471,12 +2398,6 @@ version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
[[package]]
name = "is_terminal_polyfill"
version = "1.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
[[package]]
name = "itertools"
version = "0.12.1"
@ -3085,12 +3006,6 @@ dependencies = [
"portable-atomic",
]
[[package]]
name = "once_cell_polyfill"
version = "1.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad"
[[package]]
name = "openssl-probe"
version = "0.1.6"
@ -3752,17 +3667,6 @@ dependencies = [
"crossbeam-utils",
]
[[package]]
name = "recaptcha-verify"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71e3be7b2e46e24637ac96b0c9f70070f188652018573f36f4e511dcad09738a"
dependencies = [
"reqwest",
"serde",
"serde_json",
]
[[package]]
name = "redox_syscall"
version = "0.5.13"
@ -3891,12 +3795,6 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "roff"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88f8660c1ff60292143c98d08fc6e2f654d722db50410e3f3797d40baaf9d8f3"
[[package]]
name = "ruma"
version = "0.10.1"
@ -4738,12 +4636,6 @@ dependencies = [
"quote",
]
[[package]]
name = "strsim"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "subslice"
version = "0.2.3"
@ -5286,17 +5178,6 @@ dependencies = [
"tracing-subscriber",
]
[[package]]
name = "tracing-journald"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657"
dependencies = [
"libc",
"tracing-core",
"tracing-subscriber",
]
[[package]]
name = "tracing-log"
version = "0.2.0"
@ -5474,12 +5355,6 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
[[package]]
name = "utf8parse"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
version = "1.17.0"
@ -6131,26 +6006,6 @@ dependencies = [
"markup5ever",
]
[[package]]
name = "xtask"
version = "0.5.0-rc.6"
dependencies = [
"clap",
"serde",
"serde_json",
]
[[package]]
name = "xtask-generate-commands"
version = "0.5.0-rc.6"
dependencies = [
"clap-markdown",
"clap_builder",
"clap_mangen",
"conduwuit",
"conduwuit_admin",
]
[[package]]
name = "yansi"
version = "1.0.1"

View file

@ -2,7 +2,7 @@
[workspace]
resolver = "2"
members = ["src/*", "xtask/*"]
members = ["src/*"]
default-members = ["src/*"]
[workspace.package]
@ -213,8 +213,6 @@ default-features = false
version = "0.3.19"
default-features = false
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
[workspace.dependencies.tracing-journald]
version = "0.3.1"
[workspace.dependencies.tracing-core]
version = "0.1.33"
default-features = false
@ -638,11 +636,6 @@ package = "conduwuit_build_metadata"
path = "src/build_metadata"
default-features = false
[workspace.dependencies.conduwuit]
package = "conduwuit"
path = "src/main"
###############################################################################
#
# Release profiles
@ -768,8 +761,7 @@ inherits = "dev"
# '-Clink-arg=-Wl,-z,nodlopen',
# '-Clink-arg=-Wl,-z,nodelete',
#]
[profile.dev.package.xtask-generate-commands]
inherits = "dev"
[profile.dev.package.conduwuit]
inherits = "dev"
#rustflags = [

View file

@ -17,10 +17,6 @@ DeviceAllow=char-tty
StandardInput=tty-force
StandardOutput=tty
StandardError=journal+console
Environment="CONTINUWUITY_LOG_TO_JOURNALD=1"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
TTYReset=yes
# uncomment to allow buffer to be cleared every restart
TTYVTDisallocate=no

View file

@ -407,11 +407,6 @@
# invites, or create/join or otherwise modify rooms.
# They are effectively read-only.
#
# If you want to use this to screen people who register on your server,
# you should add a room to `auto_join_rooms` that is public, and contains
# information that new users can read (since they won't be able to DM
# anyone, or send a message, and may be confused).
#
#suspend_on_register = false
# Enabling this setting opens registration to anyone without restrictions.
@ -441,26 +436,6 @@
#
#registration_token_file =
# The public site key for reCaptcha. If this is provided, reCaptcha
# becomes required during registration. If both captcha *and*
# registration token are enabled, both will be required during
# registration.
#
# IMPORTANT: "Verify the origin of reCAPTCHA solutions" **MUST** BE
# DISABLED IF YOU WANT THE CAPTCHA TO WORK IN 3RD PARTY CLIENTS, OR
# CLIENTS HOSTED ON DOMAINS OTHER THAN YOUR OWN!
#
# Registration must be enabled (`allow_registration` must be true) for
# this to have any effect.
#
#recaptcha_site_key =
# The private site key for reCaptcha.
# If this is omitted, captcha registration will not work,
# even if `recaptcha_site_key` is set.
#
#recaptcha_private_site_key =
# Controls whether encrypted rooms and events are allowed.
#
#allow_encryption = true
@ -696,21 +671,6 @@
#
#log_thread_ids = false
# Enable journald logging on Unix platforms
#
# When enabled, log output will be sent to the systemd journal
# This is only supported on Unix platforms
#
#log_to_journald = false
# The syslog identifier to use with journald logging
#
# Only used when journald logging is enabled
#
# Defaults to the binary name
#
#journald_identifier =
# OpenID token expiration/TTL in seconds.
#
# These are the OpenID tokens that are primarily used for Matrix account
@ -1104,13 +1064,6 @@
#
#presence_timeout_remote_users = true
# Allow local read receipts.
#
# Disabling this will effectively also disable outgoing federated read
# receipts.
#
#allow_local_read_receipts = true
# Allow receiving incoming read receipts from remote servers.
#
#allow_incoming_read_receipts = true
@ -1119,13 +1072,6 @@
#
#allow_outgoing_read_receipts = true
# Allow local typing updates.
#
# Disabling this will effectively also disable outgoing federated typing
# updates.
#
#allow_local_typing = true
# Allow outgoing typing updates to federation.
#
#allow_outgoing_typing = true

View file

@ -14,9 +14,6 @@ Type=notify
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Environment="CONTINUWUITY_LOG_TO_JOURNALD=1"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
ExecStart=/usr/sbin/conduwuit
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit

View file

@ -15,7 +15,6 @@
- [Appservices](appservices.md)
- [Maintenance](maintenance.md)
- [Troubleshooting](troubleshooting.md)
- [Admin Command Reference](admin_reference.md)
- [Development](development.md)
- [Contributing](contributing.md)
- [Testing](development/testing.md)

File diff suppressed because it is too large Load diff

View file

@ -1,21 +0,0 @@
# Command-Line Help for `continuwuity`
This document contains the help content for the `continuwuity` command-line program.
**Command Overview:**
* [`continuwuity`↴](#continuwuity)
## `continuwuity`
a very cool Matrix chat homeserver written in Rust
**Usage:** `continuwuity [OPTIONS]`
###### **Options:**
* `-c`, `--config <CONFIG>` — Path to the config TOML file (optional)
* `-O`, `--option <OPTION>` — Override a configuration variable using TOML 'key=value' syntax
* `--read-only` — Run in a stricter read-only --maintenance mode
* `--maintenance` — Run in maintenance mode while refusing connections
* `--execute <EXECUTE>` — Execute console command automatically after startup

153
flake.lock generated
View file

@ -10,11 +10,11 @@
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1751403276,
"narHash": "sha256-V0EPQNsQko1a8OqIWc2lLviLnMpR1m08Ej00z5RVTfs=",
"lastModified": 1738524606,
"narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "896ad88fa57ad5dbcd267c0ac51f1b71ccfcb4dd",
"rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e",
"type": "github"
},
"original": {
@ -32,11 +32,11 @@
"nixpkgs": "nixpkgs_4"
},
"locked": {
"lastModified": 1748883665,
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
"lastModified": 1737621947,
"narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=",
"owner": "cachix",
"repo": "cachix",
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
"rev": "f65a3cd5e339c223471e64c051434616e18cc4f5",
"type": "github"
},
"original": {
@ -63,11 +63,11 @@
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1744206633,
"narHash": "sha256-pb5aYkE8FOoa4n123slgHiOf1UbNSnKe5pEZC+xXD5g=",
"lastModified": 1728672398,
"narHash": "sha256-KxuGSoVUFnQLB2ZcYODW7AVPAh9JqRlD5BrfsC/Q4qs=",
"owner": "cachix",
"repo": "cachix",
"rev": "8a60090640b96f9df95d1ab99e5763a586be1404",
"rev": "aac51f698309fd0f381149214b7eee213c66ef0a",
"type": "github"
},
"original": {
@ -77,6 +77,23 @@
"type": "github"
}
},
"complement": {
"flake": false,
"locked": {
"lastModified": 1741891349,
"narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=",
"owner": "girlbossceo",
"repo": "complement",
"rev": "e587b3df569cba411aeac7c20b6366d03c143745",
"type": "github"
},
"original": {
"owner": "girlbossceo",
"ref": "main",
"repo": "complement",
"type": "github"
}
},
"crane": {
"inputs": {
"nixpkgs": [
@ -100,11 +117,11 @@
},
"crane_2": {
"locked": {
"lastModified": 1750266157,
"narHash": "sha256-tL42YoNg9y30u7zAqtoGDNdTyXTi8EALDeCB13FtbQA=",
"lastModified": 1739936662,
"narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=",
"owner": "ipetkov",
"repo": "crane",
"rev": "e37c943371b73ed87faf33f7583860f81f1d5a48",
"rev": "19de14aaeb869287647d9461cbd389187d8ecdb7",
"type": "github"
},
"original": {
@ -132,11 +149,11 @@
]
},
"locked": {
"lastModified": 1748273445,
"narHash": "sha256-5V0dzpNgQM0CHDsMzh+ludYeu1S+Y+IMjbaskSSdFh0=",
"lastModified": 1733323168,
"narHash": "sha256-d5DwB4MZvlaQpN6OQ4SLYxb5jA4UH5EtV5t5WOtjLPU=",
"owner": "cachix",
"repo": "devenv",
"rev": "668a50d8b7bdb19a0131f53c9f6c25c9071e1ffb",
"rev": "efa9010b8b1cfd5dd3c7ed1e172a470c3b84a064",
"type": "github"
},
"original": {
@ -153,11 +170,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1751525020,
"narHash": "sha256-oDO6lCYS5Bf4jUITChj9XV7k3TP38DE0Ckz5n5ORCME=",
"lastModified": 1740724364,
"narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=",
"owner": "nix-community",
"repo": "fenix",
"rev": "a1a5f92f47787e7df9f30e5e5ac13e679215aa1e",
"rev": "edf7d9e431cda8782e729253835f178a356d3aab",
"type": "github"
},
"original": {
@ -186,11 +203,11 @@
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
@ -202,11 +219,11 @@
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
@ -289,14 +306,15 @@
"nixpkgs": [
"cachix",
"nixpkgs"
]
],
"nixpkgs-stable": "nixpkgs-stable_2"
},
"locked": {
"lastModified": 1747372754,
"narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=",
"lastModified": 1733318908,
"narHash": "sha256-SVQVsbafSM1dJ4fpgyBqLZ+Lft+jcQuMtEL3lQWx2Sk=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46",
"rev": "6f4e2a2112050951a314d2733a994fbab94864c6",
"type": "github"
},
"original": {
@ -343,6 +361,23 @@
"type": "github"
}
},
"liburing": {
"flake": false,
"locked": {
"lastModified": 1740613216,
"narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=",
"owner": "axboe",
"repo": "liburing",
"rev": "e1003e496e66f9b0ae06674869795edf772d5500",
"type": "github"
},
"original": {
"owner": "axboe",
"ref": "master",
"repo": "liburing",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": [
@ -366,11 +401,11 @@
]
},
"locked": {
"lastModified": 1745930071,
"narHash": "sha256-bYyjarS3qSNqxfgc89IoVz8cAFDkF9yPE63EJr+h50s=",
"lastModified": 1727438425,
"narHash": "sha256-X8ES7I1cfNhR9oKp06F6ir4Np70WGZU5sfCOuNBEwMg=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b455edf3505f1bf0172b39a735caef94687d0d9c",
"rev": "f6c5ae4c1b2e411e6b1e6a8181cc84363d6a7546",
"type": "github"
},
"original": {
@ -449,13 +484,29 @@
"type": "github"
}
},
"nixpkgs_2": {
"nixpkgs-stable_2": {
"locked": {
"lastModified": 1733212471,
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
"lastModified": 1730741070,
"narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
"rev": "d063c1dd113c91ab27959ba540c0d9753409edf3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1730531603,
"narHash": "sha256-Dqg6si5CqIzm87sp57j5nTaeBbWhHFaVyG7V6L8k3lY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "7ffd9ae656aec493492b44d0ddfb28e79a1ea25d",
"type": "github"
},
"original": {
@ -483,11 +534,11 @@
},
"nixpkgs_4": {
"locked": {
"lastModified": 1748190013,
"narHash": "sha256-R5HJFflOfsP5FBtk+zE8FpL8uqE7n62jqOsADvVshhE=",
"lastModified": 1733212471,
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "62b852f6c6742134ade1abdd2a21685fd617a291",
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
"type": "github"
},
"original": {
@ -499,11 +550,11 @@
},
"nixpkgs_5": {
"locked": {
"lastModified": 1751498133,
"narHash": "sha256-QWJ+NQbMU+NcU2xiyo7SNox1fAuwksGlQhpzBl76g1I=",
"lastModified": 1740547748,
"narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d55716bb59b91ae9d1ced4b1ccdea7a442ecbfdb",
"rev": "3a05eebede89661660945da1f151959900903b6a",
"type": "github"
},
"original": {
@ -518,26 +569,28 @@
"locked": {
"lastModified": 1741308171,
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
"ref": "v9.11.1",
"owner": "girlbossceo",
"repo": "rocksdb",
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
"revCount": 13177,
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
"type": "github"
},
"original": {
"owner": "girlbossceo",
"ref": "v9.11.1",
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
"repo": "rocksdb",
"type": "github"
}
},
"root": {
"inputs": {
"attic": "attic",
"cachix": "cachix",
"complement": "complement",
"crane": "crane_2",
"fenix": "fenix",
"flake-compat": "flake-compat_3",
"flake-utils": "flake-utils",
"liburing": "liburing",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_5",
"rocksdb": "rocksdb"
@ -546,11 +599,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1751433876,
"narHash": "sha256-IsdwOcvLLDDlkFNwhdD5BZy20okIQL01+UQ7Kxbqh8s=",
"lastModified": 1740691488,
"narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "11d45c881389dae90b0da5a94cde52c79d0fc7ef",
"rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5",
"type": "github"
},
"original": {

823
flake.nix
View file

@ -2,344 +2,577 @@
inputs = {
attic.url = "github:zhaofengli/attic?ref=main";
cachix.url = "github:cachix/cachix?ref=master";
crane = {
url = "github:ipetkov/crane?ref=master";
};
fenix = {
url = "github:nix-community/fenix?ref=main";
inputs.nixpkgs.follows = "nixpkgs";
};
flake-compat = {
url = "github:edolstra/flake-compat?ref=master";
flake = false;
};
complement = { url = "github:girlbossceo/complement?ref=main"; flake = false; };
crane = { url = "github:ipetkov/crane?ref=master"; };
fenix = { url = "github:nix-community/fenix?ref=main"; inputs.nixpkgs.follows = "nixpkgs"; };
flake-compat = { url = "github:edolstra/flake-compat?ref=master"; flake = false; };
flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
rocksdb = {
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=v9.11.1";
flake = false;
};
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; };
liburing = { url = "github:axboe/liburing?ref=master"; flake = false; };
};
outputs =
inputs:
inputs.flake-utils.lib.eachDefaultSystem (
system:
let
pkgsHost = import inputs.nixpkgs {
outputs = inputs:
inputs.flake-utils.lib.eachDefaultSystem (system:
let
pkgsHost = import inputs.nixpkgs{
inherit system;
};
pkgsHostStatic = pkgsHost.pkgsStatic;
# The Rust toolchain to use
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI=";
};
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
inherit pkgs;
book = self.callPackage ./nix/pkgs/book {};
complement = self.callPackage ./nix/pkgs/complement {};
craneLib = ((inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain));
inherit inputs;
main = self.callPackage ./nix/pkgs/main {};
oci-image = self.callPackage ./nix/pkgs/oci-image {};
tini = pkgs.tini.overrideAttrs {
# newer clang/gcc is unhappy with tini-static: <https://3.dog/~strawberry/pb/c8y4>
patches = [ (pkgs.fetchpatch {
url = "https://patch-diff.githubusercontent.com/raw/krallin/tini/pull/224.patch";
hash = "sha256-4bTfAhRyIT71VALhHY13hUgbjLEUyvgkIJMt3w9ag3k=";
})
];
};
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [ "out" "dev" "man" ];
buildFlags = [ "library" ];
src = inputs.liburing;
};
rocksdb = (pkgs.rocksdb.override {
liburing = self.liburing;
}).overrideAttrs (old: {
src = inputs.rocksdb;
version = pkgs.lib.removePrefix
"v"
(builtins.fromJSON (builtins.readFile ./flake.lock))
.nodes.rocksdb.original.ref;
# we have this already at https://github.com/girlbossceo/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# unsetting this so i don't have to revert it and make this nix exclusive
patches = [];
cmakeFlags = pkgs.lib.subtractLists
[
# no real reason to have snappy or zlib, no one uses this
"-DWITH_SNAPPY=1"
"-DZLIB=1"
"-DWITH_ZLIB=1"
# we dont need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=1"
# we dont need to build rocksdb tests
"-DWITH_TESTS=1"
# we use rust-rocksdb via C interface and dont need C++ RTTI
"-DUSE_RTTI=1"
# this doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"-DFORCE_SSE42=1"
# PORTABLE will get set in main/default.nix
"-DPORTABLE=1"
]
old.cmakeFlags
++ [
# no real reason to have snappy, no one uses this
"-DWITH_SNAPPY=0"
"-DZLIB=0"
"-DWITH_ZLIB=0"
# we dont need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=0"
# we dont need trace tools
"-DWITH_TRACE_TOOLS=0"
# we dont need to build rocksdb tests
"-DWITH_TESTS=0"
# we use rust-rocksdb via C interface and dont need C++ RTTI
"-DUSE_RTTI=0"
];
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
});
scopeHost = mkScope pkgsHost;
scopeHostStatic = mkScope pkgsHostStatic;
scopeCrossLinux = mkScope pkgsHost.pkgsLinux.pkgsStatic;
mkCrossScope = crossSystem:
let pkgsCrossStatic = (import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
mkScope pkgsCrossStatic;
mkDevShell = scope: scope.pkgs.mkShell {
env = scope.main.env // {
# Rust Analyzer needs to be able to find the path to default crate
# sources, and it can read this environment variable to do so. The
# `rust-src` component is required in order for this to work.
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
# Convenient way to access a pinned version of Complement's source
# code.
COMPLEMENT_SRC = inputs.complement.outPath;
# Needed for Complement: <https://github.com/golang/go/issues/52690>
CGO_CFLAGS = "-Wl,--no-gc-sections";
CGO_LDFLAGS = "-Wl,--no-gc-sections";
};
# The Rust toolchain to use
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
# Development tools
packages = [
# Always use nightly rustfmt because most of its options are unstable
#
# This needs to come before `toolchain` in this list, otherwise
# `$PATH` will have stable rustfmt instead.
inputs.fenix.packages.${system}.latest.rustfmt
# See also `rust-toolchain.toml`
sha256 = "sha256-KUm16pHj+cRedf8vxs/Hd2YWxpOrWZ7UOrwhILdSJBU=";
};
toolchain
]
++ (with pkgsHost.pkgs; [
# Required by hardened-malloc.rs dep
binutils
mkScope =
pkgs:
pkgs.lib.makeScope pkgs.newScope (self: {
inherit pkgs inputs;
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
main = self.callPackage ./nix/pkgs/main { };
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [
"out"
"dev"
"man"
];
buildFlags = [ "library" ];
};
rocksdb =
(pkgs.rocksdb_9_10.override {
# Override the liburing input for the build with our own so
# we have it built with the library flag
inherit (self) liburing;
}).overrideAttrs
(old: {
src = inputs.rocksdb;
version = "v9.11.1";
cmakeFlags =
pkgs.lib.subtractLists [
# No real reason to have snappy or zlib, no one uses this
"-DWITH_SNAPPY=1"
"-DZLIB=1"
"-DWITH_ZLIB=1"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=1"
# We don't need to build rocksdb tests
"-DWITH_TESTS=1"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=1"
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"-DFORCE_SSE42=1"
# PORTABLE will get set in main/default.nix
"-DPORTABLE=1"
] old.cmakeFlags
++ [
# No real reason to have snappy, no one uses this
"-DWITH_SNAPPY=0"
"-DZLIB=0"
"-DWITH_ZLIB=0"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=0"
# We don't need trace tools
"-DWITH_TRACE_TOOLS=0"
# We don't need to build rocksdb tests
"-DWITH_TESTS=0"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=0"
];
cargo-audit
cargo-auditable
# outputs has "tools" which we don't need or use
outputs = [ "out" ];
# Needed for producing Debian packages
cargo-deb
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
preInstall = "";
# Needed for CI to check validity of produced Debian packages (dpkg-deb)
dpkg
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# Unsetting this so we don't have to revert it and make this nix exclusive
patches = [ ];
engage
postPatch = ''
# Fix gcc-13 build failures due to missing <cstdint> and
# <system_error> includes, fixed upstream since 8.x
sed -e '1i #include <cstdint>' -i db/compaction/compaction_iteration_stats.h
sed -e '1i #include <cstdint>' -i table/block_based/data_block_hash_index.h
sed -e '1i #include <cstdint>' -i util/string_util.h
sed -e '1i #include <cstdint>' -i include/rocksdb/utilities/checkpoint.h
'';
});
});
# Needed for Complement
go
scopeHost = mkScope pkgsHost;
mkCrossScope =
crossSystem:
let
pkgsCrossStatic =
(import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
mkScope pkgsCrossStatic;
# Needed for our script for Complement
jq
gotestfmt
in
{
packages =
{
default = scopeHost.main.override {
disable_features = [
# Don't include experimental features
# Needed for finding broken markdown links
lychee
# Needed for linting markdown files
markdownlint-cli
# Useful for editing the book locally
mdbook
# used for rust caching in CI to speed it up
sccache
]
# liburing is Linux-exclusive
++ lib.optional stdenv.hostPlatform.isLinux liburing
++ lib.optional stdenv.hostPlatform.isLinux numactl)
++ scope.main.buildInputs
++ scope.main.propagatedBuildInputs
++ scope.main.nativeBuildInputs;
};
in
{
packages = {
default = scopeHost.main.override {
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
default-debug = scopeHost.main.override {
profile = "dev";
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
# Just a test profile used for things like CI and complement
default-test = scopeHost.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
];
};
default-debug = scopeHost.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features = scopeHost.main.override {
all_features = true;
disable_features = [
# Don't include experimental features
];
};
# just a test profile used for things like CI and complement
default-test = scopeHost.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features = scopeHost.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features-debug = scopeHost.main.override {
profile = "dev";
all_features = true;
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
];
};
all-features-debug = scopeHost.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# This is non-functional on nix for some reason
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
hmalloc = scopeHost.main.override { features = [ "hardened_malloc" ]; };
}
// builtins.listToAttrs (
builtins.concatLists (
builtins.map
(
crossSystem:
let
binaryName = "static-${crossSystem}";
scopeCrossStatic = mkCrossScope crossSystem;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = scopeCrossStatic.main;
}
];
};
hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; };
# An output for a statically-linked binary with x86_64 haswell
# target optimisations
{
name = "${binaryName}-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
oci-image = scopeHost.oci-image;
oci-image-all-features = scopeHost.oci-image.override {
main = scopeHost.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
};
oci-image-all-features-debug = scopeHost.oci-image.override {
main = scopeHost.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
};
oci-image-hmalloc = scopeHost.oci-image.override {
main = scopeHost.main.override {
features = ["hardened_malloc"];
};
};
# An output for a statically-linked unstripped debug ("dev") binary
{
name = "${binaryName}-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
}
book = scopeHost.book;
# An output for a statically-linked unstripped debug binary with the
# "test" profile (for CI usage only)
{
name = "${binaryName}-test";
value = scopeCrossStatic.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features`
{
name = "${binaryName}-all-features";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "${binaryName}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
# An output for a statically-linked unstripped debug ("dev") binary with `--all-features`
{
name = "${binaryName}-all-features-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
value = scopeCrossStatic.main.override {
features = [ "hardened_malloc" ];
};
}
]
)
[
#"x86_64-apple-darwin"
#"aarch64-apple-darwin"
"x86_64-linux-gnu"
"x86_64-linux-musl"
"aarch64-linux-musl"
]
)
);
complement = scopeHost.complement;
static-complement = scopeHostStatic.complement;
# macOS containers don't exist, so the complement images must be forced to linux
linux-complement = (mkCrossScope "${pkgsHost.hostPlatform.qemuArch}-linux-musl").complement;
}
);
//
builtins.listToAttrs
(builtins.concatLists
(builtins.map
(crossSystem:
let
binaryName = "static-${crossSystem}";
scopeCrossStatic = mkCrossScope crossSystem;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = scopeCrossStatic.main;
}
# An output for a statically-linked binary with x86_64 haswell
# target optimisations
{
name = "${binaryName}-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
};
}
# An output for a statically-linked unstripped debug ("dev") binary
{
name = "${binaryName}-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
}
# An output for a statically-linked unstripped debug binary with the
# "test" profile (for CI usage only)
{
name = "${binaryName}-test";
value = scopeCrossStatic.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features`
{
name = "${binaryName}-all-features";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "${binaryName}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
};
}
# An output for a statically-linked unstripped debug ("dev") binary with `--all-features`
{
name = "${binaryName}-all-features-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
value = scopeCrossStatic.main.override {
features = ["hardened_malloc"];
};
}
# An output for an OCI image based on that binary
{
name = "oci-image-${crossSystem}";
value = scopeCrossStatic.oci-image;
}
# An output for an OCI image based on that binary with x86_64 haswell
# target optimisations
{
name = "oci-image-${crossSystem}-x86_64-haswell-optimised";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
};
};
}
# An output for an OCI image based on that unstripped debug ("dev") binary
{
name = "oci-image-${crossSystem}-debug";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
};
}
# An output for an OCI image based on that binary with `--all-features`
{
name = "oci-image-${crossSystem}-all-features";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
};
}
# An output for an OCI image based on that binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "oci-image-${crossSystem}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false);
};
};
}
# An output for an OCI image based on that unstripped debug ("dev") binary with `--all-features`
{
name = "oci-image-${crossSystem}-all-features-debug";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
};
}
# An output for an OCI image based on that binary with hardened_malloc
{
name = "oci-image-${crossSystem}-hmalloc";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
features = ["hardened_malloc"];
};
};
}
# An output for a complement OCI image for the specified platform
{
name = "complement-${crossSystem}";
value = scopeCrossStatic.complement;
}
]
)
[
#"x86_64-apple-darwin"
#"aarch64-apple-darwin"
"x86_64-linux-gnu"
"x86_64-linux-musl"
"aarch64-linux-musl"
]
)
);
devShells.default = mkDevShell scopeHostStatic;
devShells.all-features = mkDevShell
(scopeHostStatic.overrideScope (final: prev: {
main = prev.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}));
devShells.no-features = mkDevShell
(scopeHostStatic.overrideScope (final: prev: {
main = prev.main.override { default_features = false; };
}));
devShells.dynamic = mkDevShell scopeHost;
});
}

36
nix/pkgs/book/default.nix Normal file
View file

@ -0,0 +1,36 @@
{ inputs
# Dependencies
, main
, mdbook
, stdenv
}:
stdenv.mkDerivation {
inherit (main) pname version;
src = inputs.nix-filter {
root = inputs.self;
include = [
"book.toml"
"conduwuit-example.toml"
"CODE_OF_CONDUCT.md"
"CONTRIBUTING.md"
"README.md"
"development.md"
"debian/conduwuit.service"
"debian/README.md"
"arch/conduwuit.service"
"docs"
"theme"
];
};
nativeBuildInputs = [
mdbook
];
buildPhase = ''
mdbook build -d $out
'';
}

View file

@ -0,0 +1,21 @@
-----BEGIN CERTIFICATE-----
MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL
BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz
IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy
NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m
ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt
/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88
awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp
L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK
K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl
8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV
HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy
ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw
DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ
irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+
HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e
VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3
y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d
jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4=
-----END CERTIFICATE-----

View file

@ -0,0 +1,50 @@
[global]
address = "0.0.0.0"
allow_device_name_federation = true
allow_guest_registration = true
allow_public_room_directory_over_federation = true
allow_public_room_directory_without_auth = true
allow_registration = true
database_path = "/database"
log = "trace,h2=debug,hyper=debug"
port = [8008, 8448]
trusted_servers = []
only_query_trusted_key_servers = false
query_trusted_key_servers_first = false
query_trusted_key_servers_first_on_join = false
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true
ip_range_denylist = []
url_preview_domain_contains_allowlist = ["*"]
url_preview_domain_explicit_denylist = ["*"]
media_compat_file_link = false
media_startup_check = true
prune_missing_media = true
log_colors = true
admin_room_notices = false
allow_check_for_updates = false
intentionally_unknown_config_option_for_testing = true
rocksdb_log_level = "info"
rocksdb_max_log_files = 1
rocksdb_recovery_mode = 0
rocksdb_paranoid_file_checks = true
log_guest_registrations = false
allow_legacy_media = true
startup_netburst = true
startup_netburst_keep = -1
allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true
# valgrind makes things so slow
dns_timeout = 60
dns_attempts = 20
request_conn_timeout = 60
request_timeout = 120
well_known_conn_timeout = 60
well_known_timeout = 60
federation_idle_timeout = 300
sender_timeout = 300
sender_idle_timeout = 300
sender_retry_backoff_limit = 300
[global.tls]
dual_protocol = true

View file

@ -0,0 +1,89 @@
# Dependencies
{ bashInteractive
, buildEnv
, coreutils
, dockerTools
, lib
, main
, stdenv
, tini
, writeShellScriptBin
}:
let
main' = main.override {
profile = "test";
all_features = true;
disable_release_max_log_level = true;
disable_features = [
# console/CLI stuff isn't used or relevant for complement
"console"
"tokio_console"
# sentry telemetry isn't useful for complement, disabled by default anyways
"sentry_telemetry"
"perf_measurements"
# this is non-functional on nix for some reason
"hardened_malloc"
# dont include experimental features
"experimental"
# compression isn't needed for complement
"brotli_compression"
"gzip_compression"
"zstd_compression"
# complement doesn't need hot reloading
"conduwuit_mods"
# complement doesn't have URL preview media tests
"url_preview"
];
};
start = writeShellScriptBin "start" ''
set -euxo pipefail
${lib.getExe' coreutils "env"} \
CONDUWUIT_SERVER_NAME="$SERVER_NAME" \
${lib.getExe main'}
'';
in
dockerTools.buildImage {
name = "complement-conduwuit";
tag = "main";
copyToRoot = buildEnv {
name = "root";
pathsToLink = [
"/bin"
];
paths = [
bashInteractive
coreutils
main'
start
];
};
config = {
Cmd = [
"${lib.getExe start}"
];
Entrypoint = if !stdenv.hostPlatform.isDarwin
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
then [ "${lib.getExe' tini "tini"}" "--" ]
else [];
Env = [
"CONTINUWUITY_TLS__KEY=${./private_key.key}"
"CONTINUWUITY_TLS__CERTS=${./certificate.crt}"
"CONTINUWUITY_CONFIG=${./config.toml}"
"RUST_BACKTRACE=full"
];
ExposedPorts = {
"8008/tcp" = {};
"8448/tcp" = {};
};
};
}

View file

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb
iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT
LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a
09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc
ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga
Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO
/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu
WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB
DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb
piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN
D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ
8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+
3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq
/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90
FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q
td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M
Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A
91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV
8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh
VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW
UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K
kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz
KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7
IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh
tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM
9MVtdgSkuh2gwkD/mMoAJXM=
-----END PRIVATE KEY-----

View file

@ -0,0 +1,16 @@
-----BEGIN CERTIFICATE REQUEST-----
MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK
DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH
uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR
xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb
o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B
hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe
vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB
CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79
ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3
r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb
XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK
MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76
U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/
-----END CERTIFICATE REQUEST-----

View file

@ -0,0 +1,12 @@
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.docker.internal
DNS.2 = hs1
DNS.3 = hs2
DNS.4 = hs3
DNS.5 = hs4
IP.1 = 127.0.0.1

View file

@ -4,47 +4,51 @@
, stdenv
}:
lib.optionalAttrs stdenv.hostPlatform.isStatic
{
ROCKSDB_STATIC = "";
}
lib.optionalAttrs stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = "";
}
//
{
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
(lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[
"-l"
"c"
([]
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
# leaving PIE enabled.
++ lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[
"-l"
"c"
"-l"
"stdc++"
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68
//
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68
//
(
let
inherit (rust.lib) envVars;
in
lib.optionalAttrs
(stdenv.targetPlatform.rust.rustcTarget
!= stdenv.hostPlatform.rust.rustcTarget)
!= stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;

View file

@ -12,146 +12,144 @@
, rust-jemalloc-sys
, stdenv
# Options (keep sorted)
# Options (keep sorted)
, all_features ? false
, default_features ? true
# default list of disabled features
# default list of disabled features
, disable_features ? [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
]
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
]
, disable_release_max_log_level ? false
, features ? [ ]
, features ? []
, profile ? "release"
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
# haswell is pretty much any x86 cpu made in the last 12 years, and
# supports modern CPU extensions that rocksdb can make use of.
# disable if trying to make a portable x86_64 build for very old hardware
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
# haswell is pretty much any x86 cpu made in the last 12 years, and
# supports modern CPU extensions that rocksdb can make use of.
# disable if trying to make a portable x86_64 build for very old hardware
, x86_64_haswell_target_optimised ? false
}:
let
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
allFeatures = crateFeatures "${inputs.self}/src/main";
features' = lib.unique
(features ++
lib.optionals default_features allDefaultFeatures ++
lib.optionals all_features allFeatures);
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
features'' = lib.subtractLists disable_features' features';
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
allFeatures = crateFeatures "${inputs.self}/src/main";
features' = lib.unique
(features ++
lib.optionals default_features allDefaultFeatures ++
lib.optionals all_features allFeatures);
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level ["release_max_log_level"];
features'' = lib.subtractLists disable_features' features';
featureEnabled = feature: builtins.elem feature features'';
featureEnabled = feature : builtins.elem feature features'';
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
# own. In order for this to work, we need to set flags on the build that match
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
# which features we enable in tikv-jemalloc-sys.
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
unprefixed = true;
}).overrideAttrs (old: {
configureFlags = old.configureFlags ++
# we dont need docs
[ "--disable-doc" ] ++
# we dont need cxx/C++ integration
[ "--disable-cxx" ] ++
# tikv-jemalloc-sys/profiling feature
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
# tikv-jemalloc-sys/stats feature
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
# own. In order for this to work, we need to set flags on the build that match
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
# which features we enable in tikv-jemalloc-sys.
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
unprefixed = true;
}).overrideAttrs (old: {
configureFlags = old.configureFlags ++
# we dont need docs
[ "--disable-doc" ] ++
# we dont need cxx/C++ integration
[ "--disable-cxx" ] ++
# tikv-jemalloc-sys/profiling feature
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
# tikv-jemalloc-sys/stats feature
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
});
buildDepsOnlyEnv =
let
rocksdb' = (rocksdb.override {
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
enableLiburing = enableLiburing;
}).overrideAttrs (old: {
enableLiburing = enableLiburing;
cmakeFlags = (if x86_64_haswell_target_optimised then (lib.subtractLists [
# dont make a portable build if x86_64_haswell_target_optimised is enabled
"-DPORTABLE=1"
] old.cmakeFlags
++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ])
)
++ old.cmakeFlags;
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
in
{
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = profile;
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
}
//
(import ./cross-compilation-env.nix {
# Keep sorted
inherit
lib
pkgsBuildHost
rust
stdenv;
});
buildDepsOnlyEnv =
let
rocksdb' = (rocksdb.override {
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
inherit enableLiburing;
}).overrideAttrs (old: {
inherit enableLiburing;
cmakeFlags = (if x86_64_haswell_target_optimised then
(lib.subtractLists [
# dont make a portable build if x86_64_haswell_target_optimised is enabled
"-DPORTABLE=1"
]
old.cmakeFlags
++ [ "-DPORTABLE=haswell" ]) else [ "-DPORTABLE=1" ]
)
++ old.cmakeFlags;
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
in
{
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = profile;
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
}
//
(import ./cross-compilation-env.nix {
# Keep sorted
inherit
lib
pkgsBuildHost
rust
stdenv;
});
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
} // buildDepsOnlyEnv // {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
} // buildDepsOnlyEnv // {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
" -L${lib.getLib liburing}/lib -luring"
+ lib.optionalString x86_64_haswell_target_optimised
+ lib.optionalString x86_64_haswell_target_optimised
" -Ctarget-cpu=haswell";
};
};
commonAttrs = {
inherit
(craneLib.crateNameFromCargoToml {
cargoToml = "${inputs.self}/Cargo.toml";
})
pname
version;
commonAttrs = {
inherit
(craneLib.crateNameFromCargoToml {
cargoToml = "${inputs.self}/Cargo.toml";
})
pname
version;
src = let filter = inputs.nix-filter.lib; in filter {
root = inputs.self;
@ -162,7 +160,6 @@ let
"Cargo.lock"
"Cargo.toml"
"src"
"xtask"
];
};
@ -170,22 +167,22 @@ let
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
(features'' != [])
"--features " + (builtins.concatStringsSep "," features'');
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
];
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
];
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
@ -198,11 +195,11 @@ let
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
# rebuilds of bindgen and its depedents.
jq
];
};
];
};
in
craneLib.buildPackage (commonAttrs // {
craneLib.buildPackage ( commonAttrs // {
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
env = buildDepsOnlyEnv;
});
@ -211,8 +208,8 @@ craneLib.buildPackage (commonAttrs // {
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
(features'' != [])
"--features " + (builtins.concatStringsSep "," features'');
env = buildPackageEnv;

View file

@ -0,0 +1,46 @@
{ inputs
# Dependencies
, dockerTools
, lib
, main
, stdenv
, tini
}:
dockerTools.buildLayeredImage {
name = main.pname;
tag = "main";
created = "@${toString inputs.self.lastModified}";
contents = [
dockerTools.caCertificates
main
];
config = {
Entrypoint = if !stdenv.hostPlatform.isDarwin
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
then [ "${lib.getExe' tini "tini"}" "--" ]
else [];
Cmd = [
"${lib.getExe main}"
];
Env = [
"RUST_BACKTRACE=full"
];
Labels = {
"org.opencontainers.image.authors" = "June Clementine Strawberry <june@girlboss.ceo> and Jason Volk
<jason@zemos.net>";
"org.opencontainers.image.created" ="@${toString inputs.self.lastModified}";
"org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust";
"org.opencontainers.image.documentation" = "https://continuwuity.org/";
"org.opencontainers.image.licenses" = "Apache-2.0";
"org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or "";
"org.opencontainers.image.source" = "https://forgejo.ellis.link/continuwuation/continuwuity";
"org.opencontainers.image.title" = main.pname;
"org.opencontainers.image.url" = "https://continuwuity.org/";
"org.opencontainers.image.vendor" = "continuwuation";
"org.opencontainers.image.version" = main.version;
};
};
}

View file

@ -10,7 +10,7 @@ use crate::{
#[derive(Debug, Parser)]
#[command(name = conduwuit_core::name(), version = conduwuit_core::version())]
pub enum AdminCommand {
pub(super) enum AdminCommand {
#[command(subcommand)]
/// - Commands for managing appservices
Appservices(AppserviceCommand),

View file

@ -7,7 +7,7 @@ use crate::admin_command_dispatch;
#[derive(Debug, Subcommand)]
#[admin_command_dispatch]
pub enum AppserviceCommand {
pub(super) enum AppserviceCommand {
/// - Register an appservice using its registration YAML
///
/// This command needs a YAML generated by an appservice (such as a bridge),

View file

@ -7,6 +7,6 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum CheckCommand {
pub(super) enum CheckCommand {
CheckAllUsers,
}

View file

@ -11,7 +11,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum DebugCommand {
pub(super) enum DebugCommand {
/// - Echo input of admin command
Echo {
message: Vec<String>,
@ -32,13 +32,13 @@ pub enum DebugCommand {
/// the command.
ParsePdu,
/// - Retrieve and print a PDU by EventID from the Continuwuity database
/// - Retrieve and print a PDU by EventID from the conduwuit database
GetPdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: OwnedEventId,
},
/// - Retrieve and print a PDU by PduId from the Continuwuity database
/// - Retrieve and print a PDU by PduId from the conduwuit database
GetShortPdu {
/// Shortroomid integer
shortroomid: ShortRoomId,
@ -182,7 +182,7 @@ pub enum DebugCommand {
event_id: Option<OwnedEventId>,
},
/// - Runs a server name through Continuwuity's true destination resolution
/// - Runs a server name through conduwuit's true destination resolution
/// process
///
/// Useful for debugging well-known issues

View file

@ -4,7 +4,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, clap::Subcommand)]
pub enum TesterCommand {
pub(crate) enum TesterCommand {
Panic,
Failure,
Tester,

View file

@ -8,7 +8,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum FederationCommand {
pub(super) enum FederationCommand {
/// - List all rooms we are currently handling an incoming pdu from
IncomingFederation,

View file

@ -9,7 +9,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum MediaCommand {
pub(super) enum MediaCommand {
/// - Deletes a single media file from our database and on the filesystem
/// via a single MXC URL or event ID (not redacted)
Delete {
@ -90,10 +90,10 @@ pub enum MediaCommand {
#[arg(short, long, default_value("10000"))]
timeout: u32,
#[arg(long, default_value("800"))]
#[arg(short, long, default_value("800"))]
width: u32,
#[arg(long, default_value("800"))]
#[arg(short, long, default_value("800"))]
height: u32,
},
}

View file

@ -33,8 +33,6 @@ conduwuit::mod_ctor! {}
conduwuit::mod_dtor! {}
conduwuit::rustc_flags_capture! {}
pub use crate::admin::AdminCommand;
/// Install the admin command processor
pub async fn init(admin_service: &service::admin::Service) {
_ = admin_service

View file

@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/account_data.rs
pub enum AccountDataCommand {
pub(crate) enum AccountDataCommand {
/// - Returns all changes to the account data that happened after `since`.
ChangesSince {
/// Full user ID

View file

@ -6,7 +6,7 @@ use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/appservice.rs
pub enum AppserviceCommand {
pub(crate) enum AppserviceCommand {
/// - Gets the appservice registration info/details from the ID as a string
GetRegistration {
/// Appservice registration ID

View file

@ -6,7 +6,7 @@ use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/globals.rs
pub enum GlobalsCommand {
pub(crate) enum GlobalsCommand {
DatabaseVersion,
CurrentCount,

View file

@ -27,7 +27,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// Query tables from database
pub enum QueryCommand {
pub(super) enum QueryCommand {
/// - account_data.rs iterators and getters
#[command(subcommand)]
AccountData(AccountDataCommand),

View file

@ -7,7 +7,7 @@ use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/presence.rs
pub enum PresenceCommand {
pub(crate) enum PresenceCommand {
/// - Returns the latest presence event for the given user.
GetPresence {
/// Full user ID

View file

@ -5,7 +5,7 @@ use ruma::OwnedUserId;
use crate::Context;
#[derive(Debug, Subcommand)]
pub enum PusherCommand {
pub(crate) enum PusherCommand {
/// - Returns all the pushers for the user.
GetPushers {
/// Full user ID

View file

@ -19,7 +19,7 @@ use crate::{admin_command, admin_command_dispatch};
#[derive(Debug, Subcommand)]
#[allow(clippy::enum_variant_names)]
/// Query tables from database
pub enum RawCommand {
pub(crate) enum RawCommand {
/// - List database maps
RawMaps,

View file

@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// Resolver service and caches
pub enum ResolverCommand {
pub(crate) enum ResolverCommand {
/// Query the destinations cache
DestinationsCache {
server_name: Option<OwnedServerName>,

View file

@ -7,7 +7,7 @@ use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
pub enum RoomAliasCommand {
pub(crate) enum RoomAliasCommand {
ResolveLocalAlias {
/// Full room alias
alias: OwnedRoomAliasId,

View file

@ -6,7 +6,7 @@ use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
use crate::Context;
#[derive(Debug, Subcommand)]
pub enum RoomStateCacheCommand {
pub(crate) enum RoomStateCacheCommand {
ServerInRoom {
server: OwnedServerName,
room_id: OwnedRoomId,

View file

@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// Query tables from database
pub enum RoomTimelineCommand {
pub(crate) enum RoomTimelineCommand {
Pdus {
room_id: OwnedRoomOrAliasId,

View file

@ -8,7 +8,7 @@ use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/sending.rs
pub enum SendingCommand {
pub(crate) enum SendingCommand {
/// - Queries database for all `servercurrentevent_data`
ActiveRequests,

View file

@ -7,7 +7,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// Query tables from database
pub enum ShortCommand {
pub(crate) enum ShortCommand {
ShortEventId {
event_id: OwnedEventId,
},

View file

@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/users.rs
pub enum UsersCommand {
pub(crate) enum UsersCommand {
CountUsers,
IterUsers,

View file

@ -8,7 +8,7 @@ use ruma::{OwnedRoomAliasId, OwnedRoomId};
use crate::Context;
#[derive(Debug, Subcommand)]
pub enum RoomAliasCommand {
pub(crate) enum RoomAliasCommand {
/// - Make an alias point to a room.
Set {
#[arg(short, long)]

View file

@ -6,7 +6,7 @@ use ruma::OwnedRoomId;
use crate::{Context, PAGE_SIZE, get_room_info};
#[derive(Debug, Subcommand)]
pub enum RoomDirectoryCommand {
pub(crate) enum RoomDirectoryCommand {
/// - Publish a room to the room directory
Publish {
/// The room id of the room to publish

View file

@ -7,7 +7,7 @@ use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum RoomInfoCommand {
pub(crate) enum RoomInfoCommand {
/// - List joined members in a room
ListJoinedMembers {
room_id: OwnedRoomId,

View file

@ -16,7 +16,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum RoomCommand {
pub(super) enum RoomCommand {
/// - List all rooms the server knows about
#[clap(alias = "list")]
ListRooms {

View file

@ -12,7 +12,7 @@ use crate::{admin_command, admin_command_dispatch, get_room_info};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum RoomModerationCommand {
pub(crate) enum RoomModerationCommand {
/// - Bans a room from local users joining and evicts all our local users
/// (including server
/// admins)

View file

@ -9,7 +9,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum ServerCommand {
pub(super) enum ServerCommand {
/// - Time elapsed since startup
Uptime,

View file

@ -8,7 +8,7 @@ use crate::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub enum UserCommand {
pub(super) enum UserCommand {
/// - Create a new user
#[clap(alias = "create")]
CreateUser {

View file

@ -291,34 +291,19 @@ pub(crate) async fn register_route(
}
// UIAA
let mut uiaainfo = UiaaInfo {
flows: Vec::new(),
completed: Vec::new(),
params: Box::default(),
session: None,
auth_error: None,
};
let mut skip_auth = body.appservice_info.is_some();
if services.globals.registration_token.is_some() {
let mut uiaainfo;
let skip_auth = if services.globals.registration_token.is_some() {
// Registration token required
uiaainfo.flows.push(AuthFlow {
stages: vec![AuthType::RegistrationToken],
});
}
if services.config.recaptcha_private_site_key.is_some() {
if let Some(pubkey) = &services.config.recaptcha_site_key {
// ReCaptcha required
uiaainfo
.flows
.push(AuthFlow { stages: vec![AuthType::ReCaptcha] });
uiaainfo.params = serde_json::value::to_raw_value(&serde_json::json!({
"m.login.recaptcha": {
"public_key": pubkey,
},
}))
.expect("Failed to serialize recaptcha params");
skip_auth = skip_auth || is_guest;
}
uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::RegistrationToken],
}],
completed: Vec::new(),
params: Box::default(),
session: None,
auth_error: None,
};
body.appservice_info.is_some()
} else {
// No registration token necessary, but clients must still go through the flow
uiaainfo = UiaaInfo {
@ -328,8 +313,8 @@ pub(crate) async fn register_route(
session: None,
auth_error: None,
};
skip_auth = skip_auth || is_guest;
}
body.appservice_info.is_some() || is_guest
};
if !skip_auth {
match &body.auth {

View file

@ -58,9 +58,7 @@ pub(crate) async fn set_read_marker_route(
}
if let Some(event) = &body.read_receipt {
if services.config.allow_local_read_receipts
&& !services.users.is_suspended(sender_user).await?
{
if !services.users.is_suspended(sender_user).await? {
let receipt_content = BTreeMap::from_iter([(
event.to_owned(),
BTreeMap::from_iter([(

View file

@ -26,7 +26,7 @@ pub(crate) async fn create_typing_event_route(
{
return Err!(Request(Forbidden("You are not in this room.")));
}
if services.config.allow_local_typing && !services.users.is_suspended(sender_user).await? {
if !services.users.is_suspended(sender_user).await? {
match body.state {
| Typing::Yes(duration) => {
let duration = utils::clamp(

View file

@ -180,28 +180,19 @@ pub fn check(config: &Config) -> Result {
}
}
if config.recaptcha_site_key.is_some() && config.recaptcha_private_site_key.is_none() {
return Err!(Config(
"recaptcha_private_site_key",
"reCAPTCHA private site key is required when reCAPTCHA site key is set."
));
}
if config.allow_registration
&& !config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse
&& config.registration_token.is_none()
&& config.registration_token_file.is_none()
&& config.recaptcha_site_key.is_none()
{
return Err!(Config(
"registration_token",
"!! You have `allow_registration` enabled without a token or captcha configured \
which means you are allowing ANYONE to register on your continuwuity instance \
without any 2nd-step (e.g. registration token, captcha), which is FREQUENTLY \
abused by malicious actors. If this is not the intended behaviour, please set a \
registration token. For security and safety reasons, continuwuity will shut down. \
If you are extra sure this is the desired behaviour you want, please set the \
following config option to true:
"!! You have `allow_registration` enabled without a token configured in your config \
which means you are allowing ANYONE to register on your conduwuit instance without \
any 2nd-step (e.g. registration token). If this is not the intended behaviour, \
please set a registration token. For security and safety reasons, conduwuit will \
shut down. If you are extra sure this is the desired behaviour you want, please \
set the following config option to true:
`yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`"
));
}

View file

@ -556,24 +556,6 @@ pub struct Config {
/// example: "/etc/continuwuity/.reg_token"
pub registration_token_file: Option<PathBuf>,
/// The public site key for reCaptcha. If this is provided, reCaptcha
/// becomes required during registration. If both captcha *and*
/// registration token are enabled, both will be required during
/// registration.
///
/// IMPORTANT: "Verify the origin of reCAPTCHA solutions" **MUST** BE
/// DISABLED IF YOU WANT THE CAPTCHA TO WORK IN 3RD PARTY CLIENTS, OR
/// CLIENTS HOSTED ON DOMAINS OTHER THAN YOUR OWN!
///
/// Registration must be enabled (`allow_registration` must be true) for
/// this to have any effect.
pub recaptcha_site_key: Option<String>,
/// The private site key for reCaptcha.
/// If this is omitted, captcha registration will not work,
/// even if `recaptcha_site_key` is set.
pub recaptcha_private_site_key: Option<String>,
/// Controls whether encrypted rooms and events are allowed.
#[serde(default = "true_fn")]
pub allow_encryption: bool,
@ -829,24 +811,6 @@ pub struct Config {
#[serde(default)]
pub log_thread_ids: bool,
/// Enable journald logging on Unix platforms
///
/// When enabled, log output will be sent to the systemd journal
/// This is only supported on Unix platforms
///
/// default: false
#[cfg(target_family = "unix")]
#[serde(default)]
pub log_to_journald: bool,
/// The syslog identifier to use with journald logging
///
/// Only used when journald logging is enabled
///
/// Defaults to the binary name
#[cfg(target_family = "unix")]
pub journald_identifier: Option<String>,
/// OpenID token expiration/TTL in seconds.
///
/// These are the OpenID tokens that are primarily used for Matrix account
@ -1277,13 +1241,6 @@ pub struct Config {
#[serde(default = "true_fn")]
pub presence_timeout_remote_users: bool,
/// Allow local read receipts.
///
/// Disabling this will effectively also disable outgoing federated read
/// receipts.
#[serde(default = "true_fn")]
pub allow_local_read_receipts: bool,
/// Allow receiving incoming read receipts from remote servers.
#[serde(default = "true_fn")]
pub allow_incoming_read_receipts: bool,
@ -1292,13 +1249,6 @@ pub struct Config {
#[serde(default = "true_fn")]
pub allow_outgoing_read_receipts: bool,
/// Allow local typing updates.
///
/// Disabling this will effectively also disable outgoing federated typing
/// updates.
#[serde(default = "true_fn")]
pub allow_local_typing: bool,
/// Allow outgoing typing updates to federation.
#[serde(default = "true_fn")]
pub allow_outgoing_typing: bool,

View file

@ -281,18 +281,6 @@ where
return Ok(false);
}
if incoming_event.room_id() != room_create_event.room_id() {
warn!("room_id of incoming event does not match room_id of m.room.create event");
return Ok(false);
}
if let Some(ref pe) = power_levels_event {
if pe.room_id() != room_create_event.room_id() {
warn!("room_id of power levels event does not match room_id of m.room.create event");
return Ok(false);
}
}
// 3. If event does not have m.room.create in auth_events reject
if !incoming_event
.auth_events()
@ -418,11 +406,6 @@ where
},
};
if sender_member_event.room_id() != room_create_event.room_id() {
warn!("room_id of incoming event does not match room_id of m.room.create event");
return Ok(false);
}
let sender_membership_event_content: RoomMemberContentFields =
from_json_str(sender_member_event.content().get())?;
let Some(membership_state) = sender_membership_event_content.membership else {

View file

@ -14,13 +14,6 @@ rust-version.workspace = true
version.workspace = true
metadata.crane.workspace = true
[lib]
path = "mod.rs"
crate-type = [
"rlib",
# "dylib",
]
[package.metadata.deb]
name = "conduwuit"
maintainer = "strawberry <strawberry@puppygock.gay>"
@ -50,7 +43,6 @@ default = [
"io_uring",
"jemalloc",
"jemalloc_conf",
"journald",
"media_thumbnail",
"release_max_log_level",
"systemd",
@ -138,11 +130,6 @@ sentry_telemetry = [
systemd = [
"conduwuit-router/systemd",
]
journald = [ # This is a stub on non-unix platforms
"dep:tracing-journald",
]
# enable the tokio_console server ncompatible with release_max_log_level
tokio_console = [
"dep:console-subscriber",
@ -196,7 +183,6 @@ tracing-opentelemetry.optional = true
tracing-opentelemetry.workspace = true
tracing-subscriber.workspace = true
tracing.workspace = true
tracing-journald = { workspace = true, optional = true }
[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies]
hardened_malloc-rs.workspace = true

View file

@ -18,35 +18,35 @@ use conduwuit_core::{
name = conduwuit_core::name(),
version = conduwuit_core::version(),
)]
pub struct Args {
pub(crate) struct Args {
#[arg(short, long)]
/// Path to the config TOML file (optional)
pub config: Option<Vec<PathBuf>>,
pub(crate) config: Option<Vec<PathBuf>>,
/// Override a configuration variable using TOML 'key=value' syntax
#[arg(long, short('O'))]
pub option: Vec<String>,
pub(crate) option: Vec<String>,
/// Run in a stricter read-only --maintenance mode.
#[arg(long)]
pub read_only: bool,
pub(crate) read_only: bool,
/// Run in maintenance mode while refusing connections.
#[arg(long)]
pub maintenance: bool,
pub(crate) maintenance: bool,
#[cfg(feature = "console")]
/// Activate admin command console automatically after startup.
#[arg(long, num_args(0))]
pub console: bool,
pub(crate) console: bool,
/// Execute console command automatically after startup.
#[arg(long)]
pub execute: Vec<String>,
pub(crate) execute: Vec<String>,
/// Set functional testing modes if available. Ex '--test=smoke'
#[arg(long, hide(true))]
pub test: Vec<String>,
pub(crate) test: Vec<String>,
/// Override the tokio worker_thread count.
#[arg(
@ -55,19 +55,19 @@ pub struct Args {
env = "TOKIO_WORKER_THREADS",
default_value = available_parallelism().to_string(),
)]
pub worker_threads: usize,
pub(crate) worker_threads: usize,
/// Override the tokio global_queue_interval.
#[arg(long, hide(true), env = "TOKIO_GLOBAL_QUEUE_INTERVAL", default_value = "192")]
pub global_event_interval: u32,
pub(crate) global_event_interval: u32,
/// Override the tokio event_interval.
#[arg(long, hide(true), env = "TOKIO_EVENT_INTERVAL", default_value = "512")]
pub kernel_event_interval: u32,
pub(crate) kernel_event_interval: u32,
/// Override the tokio max_io_events_per_tick.
#[arg(long, hide(true), env = "TOKIO_MAX_IO_EVENTS_PER_TICK", default_value = "512")]
pub kernel_events_per_tick: usize,
pub(crate) kernel_events_per_tick: usize,
/// Set the histogram bucket size, in microseconds (tokio_unstable). Default
/// is 25 microseconds. If the values of the histogram don't approach zero
@ -81,7 +81,7 @@ pub struct Args {
env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL",
default_value = "25"
)]
pub worker_histogram_interval: u64,
pub(crate) worker_histogram_interval: u64,
/// Set the histogram bucket count (tokio_unstable). Default is 20.
#[arg(
@ -91,7 +91,7 @@ pub struct Args {
env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS",
default_value = "20"
)]
pub worker_histogram_buckets: usize,
pub(crate) worker_histogram_buckets: usize,
/// Toggles worker affinity feature.
#[arg(
@ -105,7 +105,7 @@ pub struct Args {
default_value = "true",
default_missing_value = "true",
)]
pub worker_affinity: bool,
pub(crate) worker_affinity: bool,
/// Toggles feature to promote memory reclamation by the operating system
/// when tokio worker runs out of work.
@ -118,7 +118,7 @@ pub struct Args {
num_args = 0..=1,
require_equals(false),
)]
pub gc_on_park: Option<bool>,
pub(crate) gc_on_park: Option<bool>,
/// Toggles muzzy decay for jemalloc arenas associated with a tokio
/// worker (when worker-affinity is enabled). Setting to false releases
@ -134,12 +134,12 @@ pub struct Args {
num_args = 0..=1,
require_equals(false),
)]
pub gc_muzzy: Option<bool>,
pub(crate) gc_muzzy: Option<bool>,
}
/// Parse commandline arguments into structured data
#[must_use]
pub(crate) fn parse() -> Args { Args::parse() }
pub(super) fn parse() -> Args { Args::parse() }
/// Synthesize any command line options with configuration file options.
pub(crate) fn update(mut config: Figment, args: &Args) -> Result<Figment> {

View file

@ -46,16 +46,6 @@ pub(crate) fn init(
.with(console_layer.with_filter(console_reload_filter))
.with(cap_layer);
// If journald logging is enabled on Unix platforms, create a separate
// subscriber for it
#[cfg(all(target_family = "unix", feature = "journald"))]
if config.log_to_journald {
println!("Initialising journald logging");
if let Err(e) = init_journald_logging(config) {
eprintln!("Failed to initialize journald logging: {e}");
}
}
#[cfg(feature = "sentry_telemetry")]
let subscriber = {
let sentry_filter = EnvFilter::try_new(&config.sentry_filter)
@ -145,28 +135,6 @@ pub(crate) fn init(
Ok(ret)
}
#[cfg(all(target_family = "unix", feature = "journald"))]
fn init_journald_logging(config: &Config) -> Result<()> {
use tracing_journald::Layer as JournaldLayer;
let journald_filter =
EnvFilter::try_new(&config.log).map_err(|e| err!(Config("log", "{e}.")))?;
let mut journald_layer = JournaldLayer::new()
.map_err(|e| err!(Config("journald", "Failed to initialize journald layer: {e}.")))?;
if let Some(ref identifier) = config.journald_identifier {
journald_layer = journald_layer.with_syslog_identifier(identifier.to_owned());
}
let journald_subscriber =
Registry::default().with(journald_layer.with_filter(journald_filter));
let _guard = tracing::subscriber::set_default(journald_subscriber);
Ok(())
}
fn tokio_console_enabled(config: &Config) -> (bool, &'static str) {
if !cfg!(all(feature = "tokio_console", tokio_unstable)) {
return (false, "");
@ -186,10 +154,7 @@ fn tokio_console_enabled(config: &Config) -> (bool, &'static str) {
(true, "")
}
fn set_global_default<S>(subscriber: S)
where
S: tracing::Subscriber + Send + Sync + 'static,
{
fn set_global_default<S: SubscriberExt + Send + Sync>(subscriber: S) {
tracing::subscriber::set_global_default(subscriber)
.expect("the global default tracing subscriber failed to be initialized");
}

View file

@ -1,3 +1,120 @@
use conduwuit::Result;
#![type_length_limit = "49152"] //TODO: reduce me
fn main() -> Result<()> { conduwuit::run() }
pub(crate) mod clap;
mod logging;
mod mods;
mod restart;
mod runtime;
mod sentry;
mod server;
mod signal;
use std::sync::{Arc, atomic::Ordering};
use conduwuit_core::{Error, Result, debug_info, error, rustc_flags_capture};
use server::Server;
rustc_flags_capture! {}
fn main() -> Result {
let args = clap::parse();
let runtime = runtime::new(&args)?;
let server = Server::new(&args, Some(runtime.handle()))?;
runtime.spawn(signal::signal(server.clone()));
runtime.block_on(async_main(&server))?;
runtime::shutdown(&server, runtime);
#[cfg(unix)]
if server.server.restarting.load(Ordering::Acquire) {
restart::restart();
}
debug_info!("Exit");
Ok(())
}
/// Operate the server normally in release-mode static builds. This will start,
/// run and stop the server within the asynchronous runtime.
#[cfg(any(not(conduwuit_mods), not(feature = "conduwuit_mods")))]
#[tracing::instrument(
name = "main",
parent = None,
skip_all
)]
async fn async_main(server: &Arc<Server>) -> Result<(), Error> {
extern crate conduwuit_router as router;
match router::start(&server.server).await {
| Ok(services) => server.services.lock().await.insert(services),
| Err(error) => {
error!("Critical error starting server: {error}");
return Err(error);
},
};
if let Err(error) = router::run(
server
.services
.lock()
.await
.as_ref()
.expect("services initialized"),
)
.await
{
error!("Critical error running server: {error}");
return Err(error);
}
if let Err(error) = router::stop(
server
.services
.lock()
.await
.take()
.expect("services initialized"),
)
.await
{
error!("Critical error stopping server: {error}");
return Err(error);
}
debug_info!("Exit runtime");
Ok(())
}
/// Operate the server in developer-mode dynamic builds. This will start, run,
/// and hot-reload portions of the server as-needed before returning for an
/// actual shutdown. This is not available in release-mode or static builds.
#[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))]
async fn async_main(server: &Arc<Server>) -> Result<(), Error> {
let mut starts = true;
let mut reloads = true;
while reloads {
if let Err(error) = mods::open(server).await {
error!("Loading router: {error}");
return Err(error);
}
let result = mods::run(server, starts).await;
if let Ok(result) = result {
(starts, reloads) = result;
}
let force = !reloads || result.is_err();
if let Err(error) = mods::close(server, force).await {
error!("Unloading router: {error}");
return Err(error);
}
if let Err(error) = result {
error!("{error}");
return Err(error);
}
}
debug_info!("Exit runtime");
Ok(())
}

View file

@ -1,129 +0,0 @@
#![type_length_limit = "49152"] //TODO: reduce me
use std::sync::{Arc, atomic::Ordering};
use conduwuit_core::{debug_info, error, rustc_flags_capture};
mod clap;
mod logging;
mod mods;
mod restart;
mod runtime;
mod sentry;
mod server;
mod signal;
use server::Server;
rustc_flags_capture! {}
pub use conduwuit_core::{Error, Result};
pub use crate::clap::Args;
pub fn run() -> Result<()> {
let args = clap::parse();
run_with_args(&args)
}
pub fn run_with_args(args: &Args) -> Result<()> {
let runtime = runtime::new(args)?;
let server = Server::new(args, Some(runtime.handle()))?;
runtime.spawn(signal::signal(server.clone()));
runtime.block_on(async_main(&server))?;
runtime::shutdown(&server, runtime);
#[cfg(unix)]
if server.server.restarting.load(Ordering::Acquire) {
restart::restart();
}
debug_info!("Exit");
Ok(())
}
/// Operate the server normally in release-mode static builds. This will start,
/// run and stop the server within the asynchronous runtime.
#[cfg(any(not(conduwuit_mods), not(feature = "conduwuit_mods")))]
#[tracing::instrument(
name = "main",
parent = None,
skip_all
)]
async fn async_main(server: &Arc<Server>) -> Result<(), Error> {
extern crate conduwuit_router as router;
match router::start(&server.server).await {
| Ok(services) => server.services.lock().await.insert(services),
| Err(error) => {
error!("Critical error starting server: {error}");
return Err(error);
},
};
if let Err(error) = router::run(
server
.services
.lock()
.await
.as_ref()
.expect("services initialized"),
)
.await
{
error!("Critical error running server: {error}");
return Err(error);
}
if let Err(error) = router::stop(
server
.services
.lock()
.await
.take()
.expect("services initialized"),
)
.await
{
error!("Critical error stopping server: {error}");
return Err(error);
}
debug_info!("Exit runtime");
Ok(())
}
/// Operate the server in developer-mode dynamic builds. This will start, run,
/// and hot-reload portions of the server as-needed before returning for an
/// actual shutdown. This is not available in release-mode or static builds.
#[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))]
async fn async_main(server: &Arc<Server>) -> Result<(), Error> {
let mut starts = true;
let mut reloads = true;
while reloads {
if let Err(error) = mods::open(server).await {
error!("Loading router: {error}");
return Err(error);
}
let result = mods::run(server, starts).await;
if let Ok(result) = result {
(starts, reloads) = result;
}
let force = !reloads || result.is_err();
if let Err(error) = mods::close(server, force).await {
error!("Unloading router: {error}");
return Err(error);
}
if let Err(error) = result {
error!("{error}");
return Err(error);
}
}
debug_info!("Exit runtime");
Ok(())
}

View file

@ -9,10 +9,7 @@ use conduwuit_core::{
};
use tokio::{runtime, sync::Mutex};
use crate::{
clap::{Args, update},
logging::TracingFlameGuard,
};
use crate::{clap::Args, logging::TracingFlameGuard};
/// Server runtime state; complete
pub(crate) struct Server {
@ -46,7 +43,7 @@ impl Server {
.map(PathBuf::as_path);
let config = Config::load(config_paths)
.and_then(|raw| update(raw, args))
.and_then(|raw| crate::clap::update(raw, args))
.and_then(|raw| Config::new(&raw))?;
let (tracing_reload_handle, tracing_flame_guard, capture) =

View file

@ -111,7 +111,6 @@ webpage.workspace = true
webpage.optional = true
blurhash.workspace = true
blurhash.optional = true
recaptcha-verify = { version = "0.1.5", default-features = false }
[lints]
workspace = true

View file

@ -177,34 +177,6 @@ pub async fn try_auth(
// Password was correct! Let's add it to `completed`
uiaainfo.completed.push(AuthType::Password);
},
| AuthData::ReCaptcha(r) => {
if self.services.config.recaptcha_private_site_key.is_none() {
return Err!(Request(Forbidden("ReCaptcha is not configured.")));
}
match recaptcha_verify::verify(
self.services
.config
.recaptcha_private_site_key
.as_ref()
.unwrap(),
r.response.as_str(),
None,
)
.await
{
| Ok(()) => {
uiaainfo.completed.push(AuthType::ReCaptcha);
},
| Err(e) => {
error!("ReCaptcha verification failed: {e:?}");
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
kind: ErrorKind::forbidden(),
message: "ReCaptcha verification failed.".to_owned(),
});
return Ok((false, uiaainfo));
},
}
},
| AuthData::RegistrationToken(t) => {
let tokens = self.read_tokens().await?;
if tokens.contains(t.token.trim()) {

View file

@ -1,26 +0,0 @@
[package]
name = "xtask-generate-commands"
authors.workspace = true
categories.workspace = true
description.workspace = true
edition.workspace = true
homepage.workspace = true
keywords.workspace = true
license.workspace = true
readme.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true
[dependencies]
clap-markdown = "0.1.5"
clap_builder = { version = "4.5.38", default-features = false }
clap_mangen = "0.2"
conduwuit-admin.workspace = true
# Hack to prevent rebuilds
conduwuit.workspace = true
[lints]
workspace = true

View file

@ -1,113 +0,0 @@
use std::{
fs::{self, File},
io::{self, Write},
path::Path,
};
use clap_builder::{Command, CommandFactory};
use conduwuit_admin::AdminCommand;
enum CommandType {
Admin,
Server,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut args = std::env::args().skip(1);
let command_type = args.next();
let task = args.next();
match (command_type, task) {
| (None, _) => {
return Err("Missing command type (admin or server)".into());
},
| (Some(cmd_type), None) => {
return Err(format!("Missing task for {cmd_type} command").into());
},
| (Some(cmd_type), Some(task)) => {
let command_type = match cmd_type.as_str() {
| "admin" => CommandType::Admin,
| "server" => CommandType::Server,
| _ => return Err(format!("Invalid command type: {cmd_type}").into()),
};
match task.as_str() {
| "man" => match command_type {
| CommandType::Admin => {
let dir = Path::new("./admin-man");
gen_admin_manpages(dir)?;
},
| CommandType::Server => {
let dir = Path::new("./server-man");
gen_server_manpages(dir)?;
},
},
| "md" => {
match command_type {
| CommandType::Admin => {
let command = AdminCommand::command().name("admin");
let res = clap_markdown::help_markdown_command_custom(
&command,
&clap_markdown::MarkdownOptions::default().show_footer(false),
)
.replace("\n\r", "\n")
.replace("\r\n", "\n")
.replace(" \n", "\n");
let mut file = File::create(Path::new("./docs/admin_reference.md"))?;
file.write_all(res.trim_end().as_bytes())?;
file.write_all(b"\n")?;
},
| CommandType::Server => {
// Get the server command from the conduwuit crate
let command = conduwuit::Args::command();
let res = clap_markdown::help_markdown_command_custom(
&command,
&clap_markdown::MarkdownOptions::default().show_footer(false),
)
.replace("\n\r", "\n")
.replace("\r\n", "\n")
.replace(" \n", "\n");
let mut file = File::create(Path::new("./docs/server_reference.md"))?;
file.write_all(res.trim_end().as_bytes())?;
file.write_all(b"\n")?;
},
}
},
| invalid => return Err(format!("Invalid task name: {invalid}").into()),
}
},
}
Ok(())
}
fn gen_manpage_common(dir: &Path, c: &Command, prefix: Option<&str>) -> Result<(), io::Error> {
fs::create_dir_all(dir)?;
let sub_name = c.get_display_name().unwrap_or_else(|| c.get_name());
let name = if let Some(prefix) = prefix {
format!("{prefix}-{sub_name}")
} else {
sub_name.to_owned()
};
let mut out = File::create(dir.join(format!("{name}.1")))?;
let clap_mangen = clap_mangen::Man::new(c.to_owned().disable_help_flag(true));
clap_mangen.render(&mut out)?;
for sub in c.get_subcommands() {
gen_manpage_common(&dir.join(sub_name), sub, Some(&name))?;
}
Ok(())
}
fn gen_admin_manpages(dir: &Path) -> Result<(), io::Error> {
gen_manpage_common(dir, &AdminCommand::command().name("admin"), None)
}
fn gen_server_manpages(dir: &Path) -> Result<(), io::Error> {
gen_manpage_common(dir, &conduwuit::Args::command(), None)
}

View file

@ -1,22 +0,0 @@
[package]
name = "xtask"
authors.workspace = true
categories.workspace = true
description.workspace = true
edition.workspace = true
homepage.workspace = true
keywords.workspace = true
license.workspace = true
readme.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true
[dependencies]
clap.workspace = true
# Required for working with JSON output from cargo metadata
serde.workspace = true
serde_json = "1.0"
[lints]
workspace = true

View file

@ -1,11 +0,0 @@
use std::{env, process::Command};
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut child = Command::new("cargo").args(["run", "--package", "xtask-generate-commands", "--"].into_iter().map(ToOwned::to_owned).chain(env::args().skip(2)))
// .stdout(Stdio::piped())
// .stderr(Stdio::piped())
.spawn()
.expect("failed to execute child");
child.wait()?;
Ok(())
}