Compare commits

...

44 commits

Author SHA1 Message Date
nexy7574
56d869c941
refactor(hydra): Merge branch 'main' into hydra/public
# Conflicts:
#	src/core/info/room_version.rs
#	src/service/rooms/timeline/create.rs
2025-08-26 01:24:06 +01:00
nexy7574
3183210459
fix: Post-merge compile issues 2025-08-23 21:28:31 +01:00
RatCornu
57d7743037 feat: add ldap_only config option 2025-08-23 19:59:36 +00:00
Jade Ellis
cb09bfa4e7 fix: Correctly pass ldap feature from the default crate 2025-08-23 19:59:36 +00:00
Jade Ellis
0ed691edef fix: Make builds without LDAP work correctly 2025-08-23 19:59:36 +00:00
Jade Ellis
c58b9f05ed chore: Fix default attributes for config 2025-08-23 19:59:36 +00:00
RatCornu
fb7e739b72 chore: remove unused LDAP mail attribute 2025-08-23 19:59:36 +00:00
RatCornu
c7adbae03f feat: ldap login 2025-08-23 19:59:36 +00:00
Jade Ellis
8b35de6a43
chore: Fix clippy lints with minimal diff 2025-08-22 00:51:54 +01:00
aviac
d191494f18
chore(nix): update fenix input
This is required, since now we're installing `rustfmt` from the latest
state of the fenix repo. This wasn't recent enough for the latest rust
version. The input was locked at (2025-07-02). Now it's up to date.
2025-08-22 00:37:16 +01:00
aviac
6d1f12b22d
chore(nix): make rustfmt-nightly available to default dev shell
I verified this by running `rustfmt --version` on my system. Note that I
don't have a system-wide install of rust and only rely on dev shells, so
this can't possibly come from somewhere else.

```
$ rustfmt --version
rustfmt 1.8.0-nightly (6677875279 2025-07-02)
```
2025-08-22 00:37:16 +01:00
aviac
ca3ee9224b
chore(rust): drop rustfmt from rust-toolchain.toml
This just installs regular rustfmt, which is not needed in this project.
One could say "It doesn't hurt", but in the NixOS dev shell it actually
does since it will shadow nightly rustfmt and we don't have the
`cargo +nightly fmt` synatx on NixOS that is available on other Distros.

Also "It doesn't hurt" to delete it for non NixOS users.
2025-08-22 00:37:16 +01:00
aviac
427b973b67
chore(rust): bump version 1.87 -> 1.89
- bump version in rust-toolchain.toml
- update sha in flake.nix
2025-08-22 00:32:04 +01:00
Tom Foster
aacaf5a2a0 fix(ci): Downgrade setup-uv action from v6 to v5
The setup-uv@v6 action has deprecated Node 18 support mid-version by
using the File API, causing workflow failures. Temporarily downgrading
to v5 until we migrate to a better runner image with Node 20+ support.
2025-08-21 21:10:15 +01:00
aviac
256bed992e
chore(nix): exec 'use flake' with direnv on NixOS systems 2025-08-21 13:40:11 +02:00
aviac
ecb87ccd1c
chore(nix): bump rocksdb version in flake.nix to 10.4.fb
This works without any further changes. Multiple people in the matrix
room (including myself) have reported that the built executable runs
fine with this. Nevertheless, there might be room for improvements (in
future commits)
2025-08-21 13:39:36 +02:00
Tom Foster
14a4b24fc5 fix(ci): Configure Renovate for Forgejo platform
- Set platform to 'forgejo' with proper API endpoint
- Use environment variables for all Renovate configuration
- Add git timeout and disable GitHub token warnings
- Move PR limit configuration to workflow
2025-08-17 17:37:24 +01:00
Tom Foster
731761f0fc Merge branch 'main' into tom/prek-was-prefligit 2025-08-17 15:08:44 +00:00
Tom Foster
4524a00fc6 chore(ci): Remove obsolete prefligit action
Now using prek directly via uvx, this custom action is no longer needed.
2025-08-17 16:00:42 +01:00
Tom Foster
9db750e97c fix(ci): Add full GitHub URL to renovate action
Forgejo's runner doesn't automatically assume actions are on github.com,
so we need to specify the full URL.
2025-08-17 15:51:29 +01:00
Tom Foster
b14a4d470b Merge branch 'main' into tom/prek-was-prefligit 2025-08-17 14:16:35 +00:00
Tom Foster
5d1f141882 ci: Rename prefligit-checks.yml to prek-checks.yml
Rename workflow file to match the updated tool name.
2025-08-17 15:13:02 +01:00
Tom Foster
b447cfff56 ci: Update prefligit to prek
The prefligit project has been renamed to prek due to typosquatting
concerns. This updates our CI to use the new name and recommended
installation method via uv, which significantly reduces setup time
compared to cargo install and includes automatic caching.

- Replace outdated static prefligit action with direct prek invocation
- Use uv as recommended by upstream: https://github.com/j178/prek
- Update check-byte-order-marker to fix-byte-order-marker (deprecated)
- Simplify workflow by removing unused ref calculations

The same .pre-commit-config.yaml works unchanged. Developers can
install locally with 'uvx prek install' or other methods from the repo.
2025-08-17 15:11:38 +01:00
Tom Foster
283888e788 Merge branch 'main' into tom/renovate 2025-08-17 13:27:33 +00:00
Tom Foster
f54e59a068 ci: Add Renovate for automated dependency management
Configures Renovate bot to create PRs for outdated dependencies.
Runs daily at 5am UTC with manual trigger via workflow_dispatch.

Configuration:
- Ignores custom forks (jemalloc, telemetry packages)
- Groups: GHA minor/patch, Rust toolchain, lockfile, Rust patches
- Limits: 3 concurrent PRs, 2 PRs per hour
- Supports: Cargo, GitHub Actions, Nix
2025-08-17 14:20:20 +01:00
Tom Foster
2a183cc5a4 fix(build): Remove hardened_malloc from full feature set
The hardened_malloc feature conflicts with jemalloc, preventing successful
builds with the --features full flag. Commenting out hardened_malloc allows
the full profile to build correctly while maintaining all other features.
2025-08-17 13:44:32 +01:00
nexy7574
54acd07555
fix: Drop fake room v2 support 2025-08-16 16:22:24 +01:00
Tom Foster
583cb924f1 refactor: address code review feedback for auth and pagination improvements
- Extract duplicated thread/message pagination functions to shared utils module
- Refactor pagination token parsing to use Option combinators instead of defaults
- Split access token generation from assignment for clearer error handling
- Add appservice token collision detection at startup and registration
- Allow appservice re-registration with same token (for config updates)
- Simplify thread relation chunk building using iterator chaining
- Fix saturating_inc edge case in relation queries with explicit filtering
- Add concise comments explaining non-obvious behaviour choices
2025-08-12 05:29:41 +01:00
Tom Foster
9286838d23 fix(relations): improve thread pagination and include root event
Replace unreliable PduCount pagination tokens with ShortEventId throughout
the relations and messages endpoints. ShortEventId provides stable, unique
identifiers that persist across server restarts and database operations.

Key improvements:
- Add token parsing helpers that try ShortEventId first, fall back to
  PduCount for backwards compatibility
- Include thread root event when paginating backwards to thread start
- Fix off-by-one error in get_relations that was returning the starting
  event in results
- Only return next_batch/prev_batch tokens when more events are available,
  preventing clients from making unnecessary requests at thread boundaries
- Ensure consistent token format between /relations, /messages, and /sync
  endpoints for interoperability

This fixes duplicate events when scrolling at thread boundaries and ensures
the thread root message is visible when viewing a thread, matching expected
client behaviour.
2025-08-10 19:12:56 +01:00
Tom Foster
d1ebcfaf0b fix(auth): prevent token collisions and optimise lookups
Ensures access tokens are unique across both user and appservice tables to
prevent authentication ambiguity and potential security issues.

Changes:
- On startup, automatically logout any user devices using tokens that
  conflict with appservice tokens (resolves in favour of appservices)
  and log a warning with affected user/device details
- When creating new user tokens, check for conflicts with appservice tokens
  and generate a new token if a collision would occur
- When registering new appservices, reject registration if the token is
  already in use by a user device
- Use futures::select_ok to race token lookups concurrently for better
  performance (adapted from tuwunel commit 066097a8)

This fix-forward approach resolves existing token collisions on startup
whilst preventing new ones from being created, without breaking existing
valid authentications.

The find_token optimisation is adapted from tuwunel (matrix-construct/tuwunel)
commit 066097a8: "Optimize user and appservice token queries" by Jason Volk.
2025-08-10 17:10:06 +01:00
Tom Foster
e820551f62 fix(appservice): create sender_localpart user during appservice startup
Fixes #813: Application services were unable to work because their sender_localpart
user was never created in the database, preventing authentication.

This fix ensures the appservice user account is created when:
- The server starts up and loads existing appservices from the database
- A new appservice is registered via the admin command

Additionally, if an appservice user has been accidentally deactivated, it will be
automatically reactivated when the appservice starts.

The solution centralises all appservice startup logic into a single `start_appservice`
helper method, eliminating code duplication between the registration and startup paths.
2025-08-10 17:10:06 +01:00
Yonatan Sidler
bd3db65cb2
fix(arch): fix config.toml not being loaded from LoadCredentials directory 2025-08-06 20:01:36 +03:00
nexy7574
e4a43b1a5b
fix(policy-server): Call the PS later in the PDU creation process
This avoids accidentally sending partially built PDUs to the policy server,
which may cause issues with some implementations
2025-08-02 00:19:33 +01:00
Jade Ellis
5775e0ad9d
docs: Make traefik router names consistent 2025-07-30 19:55:48 +01:00
Jade Ellis
238cc627e3
docs: Set traefik labels 2025-07-30 19:33:53 +01:00
Jade Ellis
b1516209c4
chore: Update funding file 2025-07-30 19:23:38 +01:00
Jade Ellis
0589884109
docs: Fix documentation link in README
Closes https://forgejo.ellis.link/continuwuation/continuwuity/issues/913
2025-07-28 19:28:34 +01:00
Jade Ellis
4a83df5b57
chore: Fix link 2025-07-25 17:35:18 +01:00
Jade Ellis
aa08edc55f
chore: Release announcement 2025-07-25 17:30:31 +01:00
Jade Ellis
00c7e220bb
chore: Release
Some checks failed
Documentation / Build and Deploy Documentation (push) Has been skipped
Checks / Prefligit / prefligit (push) Failing after 1s
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, release, linux-amd64, base) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, release, linux-arm64, base) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Checks / Rust / Format (push) Failing after 1s
Checks / Rust / Clippy (push) Failing after 3s
Checks / Rust / Cargo Test (push) Failing after 3s
2025-07-25 14:10:06 +01:00
Jade Ellis
87be4d1a52
feat: Almost-functional musl builds on Alpine
Some checks failed
Documentation / Build and Deploy Documentation (push) Has been skipped
Checks / Prefligit / prefligit (push) Failing after 1s
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, release, linux-amd64, base) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, release, linux-arm64, base) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Checks / Rust / Format (push) Failing after 1s
Checks / Rust / Clippy (push) Failing after 4s
Checks / Rust / Cargo Test (push) Failing after 4s
Lots of fiddling, still can't get stuff to work

Next step is a debian builder copying the static libs from alpine
2025-07-24 23:22:27 +01:00
Jade Ellis
205506f206
chore: Update deps 2025-07-24 22:18:10 +01:00
Jade Ellis
66181c61af
chore: Update rocksdb, feature flag changes
Some checks failed
Checks / Prefligit / prefligit (push) Failing after 5s
Release Docker Image / define-variables (push) Failing after 4s
Release Docker Image / build-image (linux/amd64, release, linux-amd64, base) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, release, linux-arm64, base) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Checks / Rust / Format (push) Failing after 5s
Checks / Rust / Clippy (push) Failing after 12s
Checks / Rust / Cargo Test (push) Failing after 10s
Most of the way to static musl builds, just zlib I think
2025-07-24 21:51:52 +01:00
Jade Ellis
b7a0442298
feat: Musl images in docker
Not working at the moment, need to upgrade the rust-rocksdb and possibly
zstd to stop them force-enabling dynamic libclang
2025-07-24 19:00:41 +01:00
53 changed files with 1842 additions and 453 deletions

4
.envrc
View file

@ -2,6 +2,8 @@
dotenv_if_exists
# use flake ".#${DIRENV_DEVSHELL:-default}"
if [ -f /etc/os-release ] && grep -q '^ID=nixos' /etc/os-release; then
use flake ".#${DIRENV_DEVSHELL:-default}"
fi
PATH_add bin

View file

@ -1,27 +0,0 @@
name: prefligit
description: |
Runs prefligit, pre-commit reimplemented in Rust.
inputs:
extra_args:
description: options to pass to pre-commit run
required: false
default: '--all-files'
runs:
using: composite
steps:
- name: Install uv
uses: https://github.com/astral-sh/setup-uv@v6
with:
enable-cache: true
ignore-nothing-to-cache: true
- name: Install Prefligit
shell: bash
run: |
curl --proto '=https' --tlsv1.2 -LsSf https://github.com/j178/prefligit/releases/download/v0.0.10/prefligit-installer.sh | sh
- uses: actions/cache@v3
with:
path: ~/.cache/prefligit
key: prefligit-0|${{ hashFiles('.pre-commit-config.yaml') }}
- run: prefligit run --show-diff-on-failure --color=always -v ${{ inputs.extra_args }}
shell: bash

View file

@ -1,22 +0,0 @@
name: Checks / Prefligit
on:
push:
pull_request:
permissions:
contents: read
jobs:
prefligit:
runs-on: ubuntu-latest
env:
FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }}
TO_REF: ${{ github.sha }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.forgejo/actions/prefligit
with:
extra_args: --all-files --hook-stage manual

View file

@ -0,0 +1,34 @@
name: Checks / Prek
on:
push:
pull_request:
permissions:
contents: read
jobs:
fast-checks:
name: Pre-commit & Formatting
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install uv
uses: https://github.com/astral-sh/setup-uv@v5
with:
enable-cache: true
ignore-nothing-to-cache: true
cache-dependency-glob: ''
- name: Run prek
run: |
uvx prek run \
--all-files \
--hook-stage manual \
--show-diff-on-failure \
--color=always \
-v

View file

@ -0,0 +1,62 @@
name: Maintenance / Renovate
on:
schedule:
# Run at 5am UTC daily to avoid late-night dev
- cron: '0 5 * * *'
workflow_dispatch:
inputs:
dryRun:
description: 'Dry run mode'
required: false
default: null
type: choice
options:
- null
- 'extract'
- 'lookup'
- 'full'
logLevel:
description: 'Log level'
required: false
default: 'info'
type: choice
options:
- 'info'
- 'warning'
- 'critical'
push:
branches:
- main
paths:
# Re-run when config changes
- '.forgejo/workflows/renovate.yml'
- 'renovate.json'
jobs:
renovate:
name: Renovate
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Self-hosted Renovate
uses: https://github.com/renovatebot/github-action@v40.1.0
env:
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
RENOVATE_AUTODISCOVER: 'false'
RENOVATE_BINARY_SOURCE: 'install'
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
RENOVATE_ENDPOINT: ${{ github.server_url }}/api/v1
RENOVATE_GIT_TIMEOUT: 60000
RENOVATE_GIT_URL: 'endpoint'
RENOVATE_GITHUB_TOKEN_WARN: 'false'
RENOVATE_ONBOARDING: 'false'
RENOVATE_PLATFORM: 'forgejo'
RENOVATE_PR_COMMITS_PER_RUN_LIMIT: 3
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
RENOVATE_REQUIRE_CONFIG: 'required'
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}

View file

@ -73,7 +73,7 @@ jobs:
run: |
cargo clippy \
--workspace \
--all-features \
--features full \
--locked \
--no-deps \
--profile test \
@ -133,7 +133,7 @@ jobs:
run: |
cargo test \
--workspace \
--all-features \
--features full \
--locked \
--profile test \
--all-targets \

5
.github/FUNDING.yml vendored
View file

@ -1,5 +1,4 @@
github: [JadedBlueEyes]
# Doesn't support an array, so we can only list nex
ko_fi: nexy7574
github: [JadedBlueEyes, nexy7574]
custom:
- https://ko-fi.com/nexy7574
- https://ko-fi.com/JadedBlueEyes

View file

@ -9,7 +9,7 @@ repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-byte-order-marker
- id: fix-byte-order-marker
- id: check-case-conflict
- id: check-symlinks
- id: destroyed-symlinks

View file

@ -65,11 +65,11 @@ Tests, compilation, and linting can be run with standard Cargo commands:
cargo test
# Check compilation
cargo check --workspace --all-features
cargo check --workspace --features full
# Run lints
cargo clippy --workspace --all-features
# Auto-fix: cargo clippy --workspace --all-features --fix --allow-staged;
cargo clippy --workspace --features full
# Auto-fix: cargo clippy --workspace --features full --fix --allow-staged;
# Format code (must use nightly)
cargo +nightly fmt

673
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -21,7 +21,7 @@ license = "Apache-2.0"
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
rust-version = "1.86.0"
version = "0.5.0-rc.6"
version = "0.5.0-rc.7"
[workspace.metadata.crane]
name = "conduwuit"
@ -392,7 +392,7 @@ features = [
[workspace.dependencies.rust-rocksdb]
git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1"
rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd"
rev = "99b0319416b64830dd6f8943e1f65e15aeef18bc"
default-features = false
features = [
"multi-threaded-cf",
@ -547,6 +547,11 @@ features = ["std"]
[workspace.dependencies.maplit]
version = "1.0.2"
[workspace.dependencies.ldap3]
version = "0.11.5"
default-features = false
features = ["sync", "tls-rustls"]
#
# Patches
#
@ -868,7 +873,7 @@ unused-qualifications = "warn"
#unused-results = "warn" # TODO
## some sadness
elided_named_lifetimes = "allow" # TODO!
mismatched_lifetime_syntaxes = "allow" # TODO!
let_underscore_drop = "allow"
missing_docs = "allow"
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
@ -1007,3 +1012,6 @@ literal_string_with_formatting_args = { level = "allow", priority = 1 }
needless_raw_string_hashes = "allow"
# TODO: Enable this lint & fix all instances
collapsible_if = "allow"

View file

@ -57,7 +57,7 @@ Continuwuity aims to:
### Can I try it out?
Check out the [documentation](introduction) for installation instructions.
Check out the [documentation](https://continuwuity.org) for installation instructions.
There are currently no open registration Continuwuity instances available.

View file

@ -64,7 +64,7 @@ StateDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750
Environment=CONTINUWUITY_CONFIG=${CREDENTIALS_DIRECTORY}/config.toml
Environment=CONTINUWUITY_CONFIG=%d/config.toml
LoadCredential=config.toml:/etc/conduwuit/conduwuit.toml
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit

View file

@ -1696,6 +1696,10 @@
#
#config_reload_signal = true
# This item is undocumented. Please contribute documentation for it.
#
#ldap = false
[global.tls]
# Path to a valid TLS certificate file.
@ -1774,3 +1778,91 @@
# is 33.55MB. Setting it to 0 disables blurhashing.
#
#blurhash_max_raw_size = 33554432
[global.ldap]
# Whether to enable LDAP login.
#
# example: "true"
#
#enable = false
# Whether to force LDAP authentication or authorize classical password
# login.
#
# example: "true"
#
#ldap_only = false
# URI of the LDAP server.
#
# example: "ldap://ldap.example.com:389"
#
#uri = ""
# Root of the searches.
#
# example: "ou=users,dc=example,dc=org"
#
#base_dn = ""
# Bind DN if anonymous search is not enabled.
#
# You can use the variable `{username}` that will be replaced by the
# entered username. In such case, the password used to bind will be the
# one provided for the login and not the one given by
# `bind_password_file`. Beware: automatically granting admin rights will
# not work if you use this direct bind instead of a LDAP search.
#
# example: "cn=ldap-reader,dc=example,dc=org" or
# "cn={username},ou=users,dc=example,dc=org"
#
#bind_dn = ""
# Path to a file on the system that contains the password for the
# `bind_dn`.
#
# The server must be able to access the file, and it must not be empty.
#
#bind_password_file = ""
# Search filter to limit user searches.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(&(objectClass=person)(memberOf=matrix))"
#
#filter = "(objectClass=*)"
# Attribute to use to uniquely identify the user.
#
# example: "uid" or "cn"
#
#uid_attribute = "uid"
# Attribute containing the display name of the user.
#
# example: "givenName" or "sn"
#
#name_attribute = "givenName"
# Root of the searches for admin users.
#
# Defaults to `base_dn` if empty.
#
# example: "ou=admins,dc=example,dc=org"
#
#admin_base_dn = ""
# The LDAP search filter to find administrative users for continuwuity.
#
# If left blank, administrative state must be configured manually for each
# user.
#
# You can use the variable `{username}` that will be replaced by the
# entered username for more complex filters.
#
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
#
#admin_filter = ""

View file

@ -78,7 +78,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
WORKDIR /app
COPY ./rust-toolchain.toml .
RUN rustc --version \
&& rustup target add $(xx-cargo --print-target-triple)
&& xx-cargo --setup-target-triple
# Build binary
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
@ -87,8 +87,10 @@ RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
# Configure pkg-config
RUN <<EOF
set -o xtrace
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
fi
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
EOF
@ -109,16 +111,17 @@ RUN <<EOF
EOF
# Apply CPU-specific optimizations if TARGET_CPU is provided
ARG TARGET_CPU=
ARG TARGET_CPU
RUN <<EOF
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
EOF
# Prepare output directories
@ -136,12 +139,12 @@ ARG TARGETPLATFORM
RUN xx-cargo --print-target-triple
# Conduwuit version info
ARG GIT_COMMIT_HASH=
ARG GIT_COMMIT_HASH_SHORT=
ARG GIT_REMOTE_URL=
ARG GIT_REMOTE_COMMIT_URL=
ARG CONDUWUIT_VERSION_EXTRA=
ARG CONTINUWUITY_VERSION_EXTRA=
ARG GIT_COMMIT_HASH
ARG GIT_COMMIT_HASH_SHORT
ARG GIT_REMOTE_URL
ARG GIT_REMOTE_COMMIT_URL
ARG CONDUWUIT_VERSION_EXTRA
ARG CONTINUWUITY_VERSION_EXTRA
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
@ -169,7 +172,7 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
for BINARY in "${BINARIES[@]}"; do
echo $BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
done
EOF

200
docker/musl.Dockerfile Normal file
View file

@ -0,0 +1,200 @@
# Why does this exist?
# Debian doesn't provide prebuilt musl packages
# rocksdb requires a prebuilt liburing, and linking fails if a gnu one is provided
ARG RUST_VERSION=1
ARG ALPINE_VERSION=3.22
FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS base
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS toolchain
# Install repo tools and dependencies
RUN --mount=type=cache,target=/etc/apk/cache apk add \
build-base pkgconfig make jq bash \
curl git file \
llvm-dev clang clang-static lld
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.13.0
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
ENV LDDTREE_VERSION=0.3.7
# Install unpackaged tools
RUN <<EOF
set -o xtrace
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
EOF
# Set up xx (cross-compilation scripts)
COPY --from=xx / /
ARG TARGETPLATFORM
# Install libraries linked by the binary
RUN --mount=type=cache,target=/etc/apk/cache xx-apk add musl-dev gcc g++ liburing-dev
# Set up Rust toolchain
WORKDIR /app
COPY ./rust-toolchain.toml .
RUN rustc --version \
&& xx-cargo --setup-target-triple
# Build binary
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
# Configure pkg-config
RUN <<EOF
set -o xtrace
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
fi
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
EOF
# Configure cc to use clang version
RUN <<EOF
set -o xtrace
echo "CC=clang" >> /etc/environment
echo "CXX=clang++" >> /etc/environment
EOF
# Cross-language LTO
RUN <<EOF
set -o xtrace
echo "CFLAGS=-flto" >> /etc/environment
echo "CXXFLAGS=-flto" >> /etc/environment
# Linker is set to target-compatible clang by xx
echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment
EOF
# Apply CPU-specific optimizations if TARGET_CPU is provided
ARG TARGET_CPU
RUN <<EOF
set -o allexport
set -o xtrace
. /etc/environment
if [ -n "${TARGET_CPU}" ]; then
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
fi
EOF
# Prepare output directories
RUN mkdir /out
FROM toolchain AS builder
# Get source
COPY . .
ARG TARGETPLATFORM
# Verify environment configuration
RUN xx-cargo --print-target-triple
# Conduwuit version info
ARG GIT_COMMIT_HASH
ARG GIT_COMMIT_HASH_SHORT
ARG GIT_REMOTE_URL
ARG GIT_REMOTE_COMMIT_URL
ARG CONDUWUIT_VERSION_EXTRA
ARG CONTINUWUITY_VERSION_EXTRA
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
ARG RUST_PROFILE=release
# Build the binary
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-musl-${RUST_PROFILE} \
bash <<'EOF'
set -o allexport
set -o xtrace
. /etc/environment
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
jq -r ".target_directory"))
mkdir /out/sbin
PACKAGE=conduwuit
xx-cargo build --locked --profile ${RUST_PROFILE} \
-p $PACKAGE --no-default-features --features bindgen-static,release_max_log_level,standard;
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
for BINARY in "${BINARIES[@]}"; do
echo $BINARY
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
done
EOF
# Generate Software Bill of Materials (SBOM)
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
bash <<'EOF'
set -o xtrace
mkdir /out/sbom
typeset -A PACKAGES
for BINARY in /out/sbin/*; do
BINARY_BASE=$(basename ${BINARY})
package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name")
if [ -z "$package" ]; then
continue
fi
PACKAGES[$package]=1
done
for PACKAGE in $(echo ${!PACKAGES[@]}); do
echo $PACKAGE
cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json
done
EOF
# Extract dynamically linked dependencies
RUN <<EOF
set -o xtrace
mkdir /out/libs
mkdir /out/libs-root
for BINARY in /out/sbin/*; do
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
done
EOF
FROM scratch
WORKDIR /
# Copy root certs for tls into image
# You can also mount the certs from the host
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
COPY --from=base /etc/ssl/certs /etc/ssl/certs
# Copy our build
COPY --from=builder /out/sbin/ /sbin/
# Copy SBOM
COPY --from=builder /out/sbom/ /sbom/
# Copy dynamic libraries to root
COPY --from=builder /out/libs-root/ /
COPY --from=builder /out/libs/ /usr/lib/
# Inform linker where to find libraries
ENV LD_LIBRARY_PATH=/usr/lib
# Continuwuity default port
EXPOSE 8008
CMD ["/sbin/conduwuit"]

View file

@ -12,6 +12,15 @@ services:
#- ./continuwuity.toml:/etc/continuwuity.toml
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
- "traefik.http.routers.continuwuity.entrypoints=websecure" # your HTTPS entry point
- "traefik.http.routers.continuwuity.tls=true"
- "traefik.http.routers.continuwuity.service=continuwuity"
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
# possibly, depending on your config:
# - "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
environment:
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity

View file

@ -12,6 +12,14 @@ services:
#- ./continuwuity.toml:/etc/continuwuity.toml
networks:
- proxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
- "traefik.http.routers.continuwuity.entrypoints=websecure"
- "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
# Uncomment and adjust the following if you want to use middleware
# - "traefik.http.routers.continuwuity.middlewares=secureHeaders@file"
environment:
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'

View file

@ -44,7 +44,7 @@ If wanting to build using standard Rust toolchains, make sure you install:
- (On linux) `pkg-config` on the compiling machine to allow finding `liburing`
- A C++ compiler and (on linux) `libclang` for RocksDB
You can build Continuwuity using `cargo build --release --all-features`.
You can build Continuwuity using `cargo build --release`.
### Building with Nix

View file

@ -6,8 +6,8 @@
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
},
{
"id": 2,
"message": "🎉 Continuwuity v0.5.0-rc.6 is now available! This release includes improved knock-restricted room handling, automatic support contact configuration, and a new HTML landing page. Check [the release notes for full details](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.6) and upgrade instructions."
"id": 3,
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
}
]
}

View file

@ -83,7 +83,7 @@ env DIRENV_DEVSHELL=all-features \
--workspace \
--locked \
--profile test \
--all-features \
--features full \
--no-deps \
--document-private-items \
--color always
@ -96,7 +96,7 @@ script = """
direnv exec . \
cargo clippy \
--workspace \
--all-features \
--features full \
--locked \
--profile test \
--color=always \
@ -114,7 +114,7 @@ env DIRENV_DEVSHELL=all-features \
--workspace \
--locked \
--profile test \
--all-features \
--features full \
--color=always \
-- \
-D warnings

24
flake.lock generated
View file

@ -153,11 +153,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1751525020,
"narHash": "sha256-oDO6lCYS5Bf4jUITChj9XV7k3TP38DE0Ckz5n5ORCME=",
"lastModified": 1755585599,
"narHash": "sha256-tl/0cnsqB/Yt7DbaGMel2RLa7QG5elA8lkaOXli6VdY=",
"owner": "nix-community",
"repo": "fenix",
"rev": "a1a5f92f47787e7df9f30e5e5ac13e679215aa1e",
"rev": "6ed03ef4c8ec36d193c18e06b9ecddde78fb7e42",
"type": "github"
},
"original": {
@ -516,16 +516,16 @@
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1741308171,
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
"ref": "v9.11.1",
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
"revCount": 13177,
"lastModified": 1753385396,
"narHash": "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=",
"ref": "10.4.fb",
"rev": "28d4b7276c16ed3e28af1bd96162d6442ce25923",
"revCount": 13318,
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
},
"original": {
"ref": "v9.11.1",
"ref": "10.4.fb",
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
}
@ -546,11 +546,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1751433876,
"narHash": "sha256-IsdwOcvLLDDlkFNwhdD5BZy20okIQL01+UQ7Kxbqh8s=",
"lastModified": 1755504847,
"narHash": "sha256-VX0B9hwhJypCGqncVVLC+SmeMVd/GAYbJZ0MiiUn2Pk=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "11d45c881389dae90b0da5a94cde52c79d0fc7ef",
"rev": "a905e3b21b144d77e1b304e49f3264f6f8d4db75",
"type": "github"
},
"original": {

View file

@ -17,7 +17,7 @@
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
rocksdb = {
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=v9.11.1";
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=10.4.fb";
flake = false;
};
};
@ -31,13 +31,17 @@
inherit system;
};
fnx = inputs.fenix.packages.${system};
# The Rust toolchain to use
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
toolchain = fnx.combine [
(fnx.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-KUm16pHj+cRedf8vxs/Hd2YWxpOrWZ7UOrwhILdSJBU=";
};
# See also `rust-toolchain.toml`
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
})
fnx.complete.rustfmt
];
mkScope =
pkgs:
@ -62,7 +66,7 @@
}).overrideAttrs
(old: {
src = inputs.rocksdb;
version = "v9.11.1";
version = "v10.4.fb";
cmakeFlags =
pkgs.lib.subtractLists [
# No real reason to have snappy or zlib, no one uses this

View file

@ -17,10 +17,46 @@
"github_actions"
],
"ignoreDeps": [
"tikv-jemllocator",
"tikv-jemallocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"opentelemetry-rust",
"opentelemetry",
"opentelemetry_sdk",
"opentelemetry-jaeger",
"tracing-opentelemetry"
],
"github-actions": {
"enabled": true,
"fileMatch": [
"(^|/)\\.forgejo/workflows/[^/]+\\.ya?ml$",
"(^|/)\\.forgejo/actions/[^/]+/action\\.ya?ml$",
"(^|/)\\.github/workflows/[^/]+\\.ya?ml$",
"(^|/)\\.github/actions/[^/]+/action\\.ya?ml$"
]
},
"packageRules": [
{
"description": "Batch minor and patch GitHub Actions updates",
"matchManagers": ["github-actions"],
"matchUpdateTypes": ["minor", "patch"],
"groupName": "github-actions-non-major"
},
{
"description": "Group Rust toolchain updates into a single PR",
"matchManagers": ["regex"],
"matchPackageNames": ["rust", "rustc", "cargo"],
"groupName": "rust-toolchain"
},
{
"description": "Group lockfile updates into a single PR",
"matchUpdateTypes": ["lockFileMaintenance"],
"groupName": "lockfile-maintenance"
},
{
"description": "Batch patch-level Rust dependency updates",
"matchManagers": ["cargo"],
"matchUpdateTypes": ["patch"],
"groupName": "rust-patch-updates"
}
]
}

View file

@ -9,13 +9,16 @@
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.87.0"
profile = "minimal"
channel = "1.89.0"
components = [
# For rust-analyzer
"rust-src",
"rust-analyzer",
# For CI and editors
"rustfmt",
"clippy",
# you have to install rustfmt nightly yourself (if you're not on NixOS)
#
# The rust-toolchain.toml file doesn't provide any syntax for specifying components from different toolchains
# "rustfmt"
]

View file

@ -68,7 +68,8 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
// Create user
self.services
.users
.create(&user_id, Some(password.as_str()))?;
.create(&user_id, Some(password.as_str()), None)
.await?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
@ -284,6 +285,7 @@ pub(super) async fn reset_password(&self, username: String, password: Option<Str
.services
.users
.set_password(&user_id, Some(new_password.as_str()))
.await
{
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
| Ok(()) => {

View file

@ -49,6 +49,9 @@ jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
"conduwuit-service/jemalloc_stats",
]
ldap = [
"conduwuit-service/ldap"
]
release_max_log_level = [
"conduwuit-core/release_max_log_level",
"conduwuit-service/release_max_log_level",

View file

@ -373,7 +373,7 @@ pub(crate) async fn register_route(
let password = if is_guest { None } else { body.password.as_deref() };
// Create user
services.users.create(&user_id, password)?;
services.users.create(&user_id, password, None).await?;
// Default to pretty displayname
let mut displayname = user_id.localpart().to_owned();
@ -659,7 +659,8 @@ pub(crate) async fn change_password_route(
services
.users
.set_password(sender_user, Some(&body.new_password))?;
.set_password(sender_user, Some(&body.new_password))
.await?;
if body.logout_devices {
// Logout all devices except the current one

View file

@ -8,7 +8,7 @@ use conduwuit::{
ref_at,
utils::{
IterStream, ReadyExt,
result::{FlatOk, LogErr},
result::LogErr,
stream::{BroadbandExt, TryIgnore, WidebandExt},
},
};
@ -35,6 +35,7 @@ use ruma::{
};
use tracing::warn;
use super::utils::{count_to_token, parse_pagination_token as parse_token};
use crate::Ruma;
/// list of safe and common non-state events to ignore if the user is ignored
@ -84,14 +85,14 @@ pub(crate) async fn get_message_events_route(
let from: PduCount = body
.from
.as_deref()
.map(str::parse)
.map(parse_token)
.transpose()?
.unwrap_or_else(|| match body.dir {
| Direction::Forward => PduCount::min(),
| Direction::Backward => PduCount::max(),
});
let to: Option<PduCount> = body.to.as_deref().map(str::parse).flat_ok();
let to: Option<PduCount> = body.to.as_deref().map(parse_token).transpose()?;
let limit: usize = body
.limit
@ -180,8 +181,8 @@ pub(crate) async fn get_message_events_route(
.collect();
Ok(get_message_events::v3::Response {
start: from.to_string(),
end: next_token.as_ref().map(ToString::to_string),
start: count_to_token(from),
end: next_token.map(count_to_token),
chunk,
state,
})

View file

@ -36,6 +36,7 @@ pub(super) mod typing;
pub(super) mod unstable;
pub(super) mod unversioned;
pub(super) mod user_directory;
pub(super) mod utils;
pub(super) mod voip;
pub(super) mod well_known;

View file

@ -90,7 +90,7 @@ pub(crate) async fn get_displayname_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services
@ -189,7 +189,7 @@ pub(crate) async fn get_avatar_url_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services
@ -248,7 +248,7 @@ pub(crate) async fn get_profile_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services

View file

@ -18,6 +18,7 @@ use ruma::{
events::{TimelineEventType, relation::RelationType},
};
use super::utils::{count_to_token, parse_pagination_token as parse_token};
use crate::Ruma;
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
@ -110,14 +111,14 @@ async fn paginate_relations_with_filter(
dir: Direction,
) -> Result<get_relating_events::v1::Response> {
let start: PduCount = from
.map(str::parse)
.map(parse_token)
.transpose()?
.unwrap_or_else(|| match dir {
| Direction::Forward => PduCount::min(),
| Direction::Backward => PduCount::max(),
});
let to: Option<PduCount> = to.map(str::parse).flat_ok();
let to: Option<PduCount> = to.map(parse_token).transpose()?;
// Use limit or else 30, with maximum 100
let limit: usize = limit
@ -129,6 +130,11 @@ async fn paginate_relations_with_filter(
// Spec (v1.10) recommends depth of at least 3
let depth: u8 = if recurse { 3 } else { 1 };
// Check if this is a thread request
let is_thread = filter_rel_type
.as_ref()
.is_some_and(|rel| *rel == RelationType::Thread);
let events: Vec<_> = services
.rooms
.pdu_metadata
@ -152,23 +158,58 @@ async fn paginate_relations_with_filter(
.collect()
.await;
let next_batch = match dir {
| Direction::Forward => events.last(),
| Direction::Backward => events.first(),
// For threads, check if we should include the root event
let mut root_event = None;
if is_thread && dir == Direction::Backward {
// Check if we've reached the beginning of the thread
// (fewer events than requested means we've exhausted the thread)
if events.len() < limit {
// Try to get the thread root event
if let Ok(root_pdu) = services.rooms.timeline.get_pdu(target).await {
// Check visibility
if services
.rooms
.state_accessor
.user_can_see_event(sender_user, room_id, target)
.await
{
// Store the root event to add to the response
root_event = Some(root_pdu);
}
}
}
}
.map(at!(0))
.as_ref()
.map(ToString::to_string);
// Determine if there are more events to fetch
let has_more = if root_event.is_some() {
false // We've included the root, no more events
} else {
// Check if we got a full page of results (might be more)
events.len() >= limit
};
let next_batch = if has_more {
match dir {
| Direction::Forward => events.last(),
| Direction::Backward => events.first(),
}
.map(|(count, _)| count_to_token(*count))
} else {
None
};
// Build the response chunk with thread root if needed
let chunk: Vec<_> = root_event
.into_iter()
.map(Event::into_format)
.chain(events.into_iter().map(at!(1)).map(Event::into_format))
.collect();
Ok(get_relating_events::v1::Response {
next_batch,
prev_batch: from.map(Into::into),
recursion_depth: recurse.then_some(depth.into()),
chunk: events
.into_iter()
.map(at!(1))
.map(Event::into_format)
.collect(),
chunk,
})
}

View file

@ -3,13 +3,14 @@ use std::time::Duration;
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Error, Result, debug, err, info, utils,
utils::{ReadyExt, hash},
Err, Error, Result, debug, err, info,
utils::{self, ReadyExt, hash},
};
use conduwuit_service::uiaa::SESSION_ID_LENGTH;
use conduwuit_core::{debug_error, debug_warn};
use conduwuit_service::{Services, uiaa::SESSION_ID_LENGTH};
use futures::StreamExt;
use ruma::{
UserId,
OwnedUserId, UserId,
api::client::{
session::{
get_login_token,
@ -49,6 +50,154 @@ pub(crate) async fn get_login_types_route(
]))
}
/// Authenticates the given user by its ID and its password.
///
/// Returns the user ID if successful, and an error otherwise.
#[tracing::instrument(skip_all, fields(%user_id), name = "password")]
pub(crate) async fn password_login(
services: &Services,
user_id: &UserId,
lowercased_user_id: &UserId,
password: &str,
) -> Result<OwnedUserId> {
// Restrict login to accounts only of type 'password', including untyped
// legacy accounts which are equivalent to 'password'.
if services
.users
.origin(user_id)
.await
.is_ok_and(|origin| origin != "password")
{
return Err!(Request(Forbidden("Account does not permit password login.")));
}
let (hash, user_id) = match services.users.password_hash(user_id).await {
| Ok(hash) => (hash, user_id),
| Err(_) => services
.users
.password_hash(lowercased_user_id)
.await
.map(|hash| (hash, lowercased_user_id))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?,
};
if hash.is_empty() {
return Err!(Request(UserDeactivated("The user has been deactivated")));
}
hash::verify_password(password, &hash)
.inspect_err(|e| debug_error!("{e}"))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
Ok(user_id.to_owned())
}
/// Authenticates the given user through the configured LDAP server.
///
/// Creates the user if the user is found in the LDAP and do not already have an
/// account.
#[tracing::instrument(skip_all, fields(%user_id), name = "ldap")]
pub(super) async fn ldap_login(
services: &Services,
user_id: &UserId,
lowercased_user_id: &UserId,
password: &str,
) -> Result<OwnedUserId> {
let (user_dn, is_ldap_admin) = match services.config.ldap.bind_dn.as_ref() {
| Some(bind_dn) if bind_dn.contains("{username}") =>
(bind_dn.replace("{username}", lowercased_user_id.localpart()), false),
| _ => {
debug!("Searching user in LDAP");
let dns = services.users.search_ldap(user_id).await?;
if dns.len() >= 2 {
return Err!(Ldap("LDAP search returned two or more results"));
}
let Some((user_dn, is_admin)) = dns.first() else {
return password_login(services, user_id, lowercased_user_id, password).await;
};
(user_dn.clone(), *is_admin)
},
};
let user_id = services
.users
.auth_ldap(&user_dn, password)
.await
.map(|()| lowercased_user_id.to_owned())?;
// LDAP users are automatically created on first login attempt. This is a very
// common feature that can be seen on many services using a LDAP provider for
// their users (synapse, Nextcloud, Jellyfin, ...).
//
// LDAP users are crated with a dummy password but non empty because an empty
// password is reserved for deactivated accounts. The conduwuit password field
// will never be read to login a LDAP user so it's not an issue.
if !services.users.exists(lowercased_user_id).await {
services
.users
.create(lowercased_user_id, Some("*"), Some("ldap"))
.await?;
}
let is_conduwuit_admin = services.admin.user_is_admin(lowercased_user_id).await;
if is_ldap_admin && !is_conduwuit_admin {
services.admin.make_user_admin(lowercased_user_id).await?;
} else if !is_ldap_admin && is_conduwuit_admin {
services.admin.revoke_admin(lowercased_user_id).await?;
}
Ok(user_id)
}
pub(crate) async fn handle_login(
services: &Services,
body: &Ruma<login::v3::Request>,
identifier: Option<&uiaa::UserIdentifier>,
password: &str,
user: Option<&String>,
) -> Result<OwnedUserId> {
debug!("Got password login type");
let user_id =
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
UserId::parse_with_server_name(user_id, &services.config.server_name)
} else if let Some(user) = user {
UserId::parse_with_server_name(user, &services.config.server_name)
} else {
return Err!(Request(Unknown(
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
)));
}
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
let lowercased_user_id = UserId::parse_with_server_name(
user_id.localpart().to_lowercase(),
&services.config.server_name,
)?;
if !services.globals.user_is_local(&user_id)
|| !services.globals.user_is_local(&lowercased_user_id)
{
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
}
if cfg!(feature = "ldap") && services.config.ldap.enable {
match Box::pin(ldap_login(services, &user_id, &lowercased_user_id, password)).await {
| Ok(user_id) => Ok(user_id),
| Err(err) if services.config.ldap.ldap_only => Err(err),
| Err(err) => {
debug_warn!("{err}");
password_login(services, &user_id, &lowercased_user_id, password).await
},
}
} else {
password_login(services, &user_id, &lowercased_user_id, password).await
}
}
/// # `POST /_matrix/client/v3/login`
///
/// Authenticates the user and returns an access token it can use in subsequent
@ -80,70 +229,7 @@ pub(crate) async fn login_route(
password,
user,
..
}) => {
debug!("Got password login type");
let user_id =
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
UserId::parse_with_server_name(user_id, &services.config.server_name)
} else if let Some(user) = user {
UserId::parse_with_server_name(user, &services.config.server_name)
} else {
return Err!(Request(Unknown(
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
)));
}
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
let lowercased_user_id = UserId::parse_with_server_name(
user_id.localpart().to_lowercase(),
&services.config.server_name,
)?;
if !services.globals.user_is_local(&user_id)
|| !services.globals.user_is_local(&lowercased_user_id)
{
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
}
// first try the username as-is
let hash = services
.users
.password_hash(&user_id)
.await
.inspect_err(|e| debug!("{e}"));
match hash {
| Ok(hash) => {
if hash.is_empty() {
return Err!(Request(UserDeactivated("The user has been deactivated")));
}
hash::verify_password(password, &hash)
.inspect_err(|e| debug!("{e}"))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
user_id
},
| Err(_e) => {
let hash_lowercased_user_id = services
.users
.password_hash(&lowercased_user_id)
.await
.inspect_err(|e| debug!("{e}"))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
if hash_lowercased_user_id.is_empty() {
return Err!(Request(UserDeactivated("The user has been deactivated")));
}
hash::verify_password(password, &hash_lowercased_user_id)
.inspect_err(|e| debug!("{e}"))
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
lowercased_user_id
},
}
},
}) => handle_login(&services, &body, identifier.as_ref(), password, user.as_ref()).await?,
| login::v3::LoginInfo::Token(login::v3::Token { token }) => {
debug!("Got token login type");
if !services.server.config.login_via_existing_session {
@ -198,8 +284,8 @@ pub(crate) async fn login_route(
.clone()
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
// Generate a new token for the device
let token = utils::random_string(TOKEN_LENGTH);
// Generate a new token for the device (ensuring no collisions)
let token = services.users.generate_unique_token().await;
// Determine if device_id was provided and exists in the db for this user
let device_exists = if body.device_id.is_some() {

View file

@ -45,6 +45,7 @@ use crate::{
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
#[allow(clippy::cognitive_complexity)]
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
///
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)

View file

@ -292,7 +292,7 @@ pub(crate) async fn get_timezone_key_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services
@ -352,7 +352,7 @@ pub(crate) async fn get_profile_key_route(
.await
{
if !services.users.exists(&body.user_id).await {
services.users.create(&body.user_id, None)?;
services.users.create(&body.user_id, None, None).await?;
}
services

28
src/api/client/utils.rs Normal file
View file

@ -0,0 +1,28 @@
use conduwuit::{
Result, err,
matrix::pdu::{PduCount, ShortEventId},
};
/// Parse a pagination token, trying ShortEventId first, then falling back to
/// PduCount
pub(crate) fn parse_pagination_token(token: &str) -> Result<PduCount> {
// Try parsing as ShortEventId first
if let Ok(shorteventid) = token.parse::<ShortEventId>() {
// ShortEventId maps directly to a PduCount in our database
Ok(PduCount::Normal(shorteventid))
} else if let Ok(count) = token.parse::<u64>() {
// Fallback to PduCount for backwards compatibility
Ok(PduCount::Normal(count))
} else if let Ok(count) = token.parse::<i64>() {
// Also handle negative counts for backfilled events
Ok(PduCount::from_signed(count))
} else {
Err(err!(Request(InvalidParam("Invalid pagination token"))))
}
}
/// Convert a PduCount to a token string (using the underlying ShortEventId)
pub(crate) fn count_to_token(count: PduCount) -> String {
// The PduCount's unsigned value IS the ShortEventId
count.into_unsigned().to_string()
}

View file

@ -5,6 +5,14 @@ use axum_extra::{
typed_header::TypedHeaderRejectionReason,
};
use conduwuit::{Err, Error, Result, debug_error, err, warn};
use futures::{
TryFutureExt,
future::{
Either::{Left, Right},
select_ok,
},
pin_mut,
};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
api::{
@ -54,17 +62,7 @@ pub(super) async fn auth(
| None => request.query.access_token.as_deref(),
};
let token = if let Some(token) = token {
match services.appservice.find_from_token(token).await {
| Some(reg_info) => Token::Appservice(Box::new(reg_info)),
| _ => match services.users.find_from_token(token).await {
| Ok((user_id, device_id)) => Token::User((user_id, device_id)),
| _ => Token::Invalid,
},
}
} else {
Token::None
};
let token = find_token(services, token).await?;
if metadata.authentication == AuthScheme::None {
match metadata {
@ -342,3 +340,25 @@ async fn parse_x_matrix(request: &mut Request) -> Result<XMatrix> {
Ok(x_matrix)
}
async fn find_token(services: &Services, token: Option<&str>) -> Result<Token> {
let Some(token) = token else {
return Ok(Token::None);
};
let user_token = services.users.find_from_token(token).map_ok(Token::User);
let appservice_token = services
.appservice
.find_from_token(token)
.map_ok(Box::new)
.map_ok(Token::Appservice);
pin_mut!(user_token, appservice_token);
// Returns Ok if either token type succeeds, Err only if both fail
match select_ok([Left(user_token), Right(appservice_token)]).await {
| Err(e) if !e.is_not_found() => Err(e),
| Ok((token, _)) => Ok(token),
| _ => Ok(Token::Invalid),
}
}

View file

@ -1,3 +1,4 @@
#![allow(clippy::doc_link_with_quotes)]
pub mod check;
pub mod manager;
pub mod proxy;
@ -1947,6 +1948,10 @@ pub struct Config {
pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure:
bool,
// external structure; separate section
#[serde(default)]
pub ldap: LdapConfig,
// external structure; separate section
#[serde(default)]
pub blurhashing: BlurhashConfig,
@ -2041,6 +2046,114 @@ pub struct BlurhashConfig {
pub blurhash_max_raw_size: u64,
}
#[derive(Clone, Debug, Default, Deserialize)]
#[config_example_generator(filename = "conduwuit-example.toml", section = "global.ldap")]
pub struct LdapConfig {
/// Whether to enable LDAP login.
///
/// example: "true"
#[serde(default)]
pub enable: bool,
/// Whether to force LDAP authentication or authorize classical password
/// login.
///
/// example: "true"
#[serde(default)]
pub ldap_only: bool,
/// URI of the LDAP server.
///
/// example: "ldap://ldap.example.com:389"
///
/// default: ""
#[serde(default)]
pub uri: Option<Url>,
/// Root of the searches.
///
/// example: "ou=users,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub base_dn: String,
/// Bind DN if anonymous search is not enabled.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username. In such case, the password used to bind will be the
/// one provided for the login and not the one given by
/// `bind_password_file`. Beware: automatically granting admin rights will
/// not work if you use this direct bind instead of a LDAP search.
///
/// example: "cn=ldap-reader,dc=example,dc=org" or
/// "cn={username},ou=users,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub bind_dn: Option<String>,
/// Path to a file on the system that contains the password for the
/// `bind_dn`.
///
/// The server must be able to access the file, and it must not be empty.
///
/// default: ""
#[serde(default)]
pub bind_password_file: Option<PathBuf>,
/// Search filter to limit user searches.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username for more complex filters.
///
/// example: "(&(objectClass=person)(memberOf=matrix))"
///
/// default: "(objectClass=*)"
#[serde(default = "default_ldap_search_filter")]
pub filter: String,
/// Attribute to use to uniquely identify the user.
///
/// example: "uid" or "cn"
///
/// default: "uid"
#[serde(default = "default_ldap_uid_attribute")]
pub uid_attribute: String,
/// Attribute containing the display name of the user.
///
/// example: "givenName" or "sn"
///
/// default: "givenName"
#[serde(default = "default_ldap_name_attribute")]
pub name_attribute: String,
/// Root of the searches for admin users.
///
/// Defaults to `base_dn` if empty.
///
/// example: "ou=admins,dc=example,dc=org"
///
/// default: ""
#[serde(default)]
pub admin_base_dn: String,
/// The LDAP search filter to find administrative users for continuwuity.
///
/// If left blank, administrative state must be configured manually for each
/// user.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username for more complex filters.
///
/// example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
///
/// default: ""
#[serde(default)]
pub admin_filter: String,
}
#[derive(Deserialize, Clone, Debug)]
#[serde(transparent)]
struct ListeningPort {
@ -2430,3 +2543,9 @@ pub(super) fn default_blurhash_x_component() -> u32 { 4 }
pub(super) fn default_blurhash_y_component() -> u32 { 3 }
// end recommended & blurhashing defaults
fn default_ldap_search_filter() -> String { "(objectClass=*)".to_owned() }
fn default_ldap_uid_attribute() -> String { String::from("uid") }
fn default_ldap_name_attribute() -> String { String::from("givenName") }

View file

@ -100,7 +100,7 @@ pub fn trap() {
#[must_use]
pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str {
p.downcast_ref::<&str>().copied().unwrap_or_default()
(**p).downcast_ref::<&str>().copied().unwrap_or_default()
}
#[inline(always)]

View file

@ -110,6 +110,8 @@ pub enum Error {
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
#[error(transparent)]
IntoHttp(#[from] ruma::api::error::IntoHttpError),
#[error("{0}")]
Ldap(Cow<'static, str>),
#[error(transparent)]
Mxc(#[from] ruma::MxcUriError),
#[error(transparent)]

View file

@ -17,13 +17,8 @@ pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[
];
/// Experimental, partially supported room versions
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] = &[
RoomVersionId::V2,
RoomVersionId::V3,
RoomVersionId::V4,
RoomVersionId::V5,
RoomVersionId::V12,
];
pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] =
&[RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5, RoomVersionId::V12];
type RoomVersion = (RoomVersionId, RoomVersionStability);

View file

@ -44,6 +44,14 @@ zstd_compression = [
"conduwuit-core/zstd_compression",
"rust-rocksdb/zstd",
]
bindgen-static = [
# "bindgen/static"
# "clang-sys/static"
"rust-rocksdb/bindgen-static"
]
bindgen-runtime = [
"rust-rocksdb/bindgen-runtime"
]
[dependencies]
async-channel.workspace = true

View file

@ -374,6 +374,10 @@ pub(super) static MAPS: &[Descriptor] = &[
name: "userid_masterkeyid",
..descriptor::RANDOM_SMALL
},
Descriptor {
name: "userid_origin",
..descriptor::RANDOM
},
Descriptor {
name: "userid_password",
..descriptor::RANDOM

View file

@ -43,6 +43,11 @@ assets = [
[features]
default = [
"standard",
"release_max_log_level",
"bindgen-runtime", # replace with bindgen-static on alpine
]
standard = [
"blurhashing",
"brotli_compression",
"element_hacks",
@ -51,11 +56,19 @@ default = [
"jemalloc",
"jemalloc_conf",
"journald",
"ldap",
"media_thumbnail",
"release_max_log_level",
"systemd",
"url_preview",
"zstd_compression",
"zstd_compression"
]
full = [
"standard",
# "hardened_malloc", # Conflicts with jemalloc
"jemalloc_prof",
"perf_measurements",
"tokio_console"
# sentry_telemetry
]
blurhashing = [
@ -102,6 +115,9 @@ jemalloc_stats = [
jemalloc_conf = [
"conduwuit-core/jemalloc_conf",
]
ldap = [
"conduwuit-api/ldap",
]
media_thumbnail = [
"conduwuit-service/media_thumbnail",
]
@ -161,6 +177,18 @@ zstd_compression = [
conduwuit_mods = [
"conduwuit-core/conduwuit_mods",
]
bindgen-static = [
# "bindgen/static"
# "clang-sys/static"
"conduwuit-database/bindgen-static"
]
bindgen-runtime = [
"conduwuit-database/bindgen-runtime"
]
[build-dependencies]
# bindgen = {version = "0.71.1", default-features = false}
# clang-sys = {version = "1", default-features = false}
[dependencies]
conduwuit-admin.workspace = true

View file

@ -30,7 +30,7 @@ use tower::{Service, ServiceExt};
type MakeService = IntoMakeServiceWithConnectInfo<Router, net::SocketAddr>;
const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0);
const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750);
#[tracing::instrument(skip_all, level = "debug")]

View file

@ -53,6 +53,9 @@ jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
"conduwuit-database/jemalloc_stats",
]
ldap = [
"dep:ldap3"
]
media_thumbnail = [
"dep:image",
]
@ -89,6 +92,8 @@ image.workspace = true
image.optional = true
ipaddress.workspace = true
itertools.workspace = true
ldap3.workspace = true
ldap3.optional = true
log.workspace = true
loole.workspace = true
lru-cache.workspace = true

View file

@ -38,7 +38,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
// Create a user for the server
let server_user = services.globals.server_user.as_ref();
services.users.create(server_user, None)?;
services.users.create(server_user, None, None).await?;
let create_content = {
use RoomVersionId::*;

View file

@ -4,14 +4,14 @@ mod registration_info;
use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc};
use async_trait::async_trait;
use conduwuit::{Result, err, utils::stream::IterStream};
use conduwuit::{Err, Result, err, utils::stream::IterStream};
use database::Map;
use futures::{Future, FutureExt, Stream, TryStreamExt};
use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration};
use tokio::sync::{RwLock, RwLockReadGuard};
pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo};
use crate::{Dep, sending};
use crate::{Dep, globals, sending, users};
pub struct Service {
registration_info: RwLock<Registrations>,
@ -20,7 +20,9 @@ pub struct Service {
}
struct Services {
globals: Dep<globals::Service>,
sending: Dep<sending::Service>,
users: Dep<users::Service>,
}
struct Data {
@ -35,7 +37,9 @@ impl crate::Service for Service {
Ok(Arc::new(Self {
registration_info: RwLock::new(BTreeMap::new()),
services: Services {
globals: args.depend::<globals::Service>("globals"),
sending: args.depend::<sending::Service>("sending"),
users: args.depend::<users::Service>("users"),
},
db: Data {
id_appserviceregistrations: args.db["id_appserviceregistrations"].clone(),
@ -44,23 +48,93 @@ impl crate::Service for Service {
}
async fn worker(self: Arc<Self>) -> Result {
// Inserting registrations into cache
self.iter_db_ids()
.try_for_each(async |appservice| {
self.registration_info
.write()
.await
.insert(appservice.0, appservice.1.try_into()?);
// First, collect all appservices to check for token conflicts
let appservices: Vec<(String, Registration)> = self.iter_db_ids().try_collect().await?;
Ok(())
})
.await
// Check for appservice-to-appservice token conflicts
for i in 0..appservices.len() {
for j in i.saturating_add(1)..appservices.len() {
if appservices[i].1.as_token == appservices[j].1.as_token {
return Err!(Database(error!(
"Token collision detected: Appservices '{}' and '{}' have the same token",
appservices[i].0, appservices[j].0
)));
}
}
}
// Process each appservice
for (id, registration) in appservices {
// During startup, resolve any token collisions in favour of appservices
// by logging out conflicting user devices
if let Ok((user_id, device_id)) = self
.services
.users
.find_from_token(&registration.as_token)
.await
{
conduwuit::warn!(
"Token collision detected during startup: Appservice '{}' token was also \
used by user '{}' device '{}'. Logging out the user device to resolve \
conflict.",
id,
user_id.localpart(),
device_id
);
self.services
.users
.remove_device(&user_id, &device_id)
.await;
}
self.start_appservice(id, registration).await?;
}
Ok(())
}
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
}
impl Service {
/// Starts an appservice, ensuring its sender_localpart user exists and is
/// active. Creates the user if it doesn't exist, or reactivates it if it
/// was deactivated. Then registers the appservice in memory for request
/// handling.
async fn start_appservice(&self, id: String, registration: Registration) -> Result {
let appservice_user_id = UserId::parse_with_server_name(
registration.sender_localpart.as_str(),
self.services.globals.server_name(),
)?;
if !self.services.users.exists(&appservice_user_id).await {
self.services
.users
.create(&appservice_user_id, None, None)
.await?;
} else if self
.services
.users
.is_deactivated(&appservice_user_id)
.await
.unwrap_or(false)
{
// Reactivate the appservice user if it was accidentally deactivated
self.services
.users
.set_password(&appservice_user_id, None)
.await?;
}
self.registration_info
.write()
.await
.insert(id, registration.try_into()?);
Ok(())
}
/// Registers an appservice and returns the ID to the caller
pub async fn register_appservice(
&self,
@ -68,15 +142,40 @@ impl Service {
appservice_config_body: &str,
) -> Result {
//TODO: Check for collisions between exclusive appservice namespaces
self.registration_info
.write()
// Check for token collision with other appservices (allow re-registration of
// same appservice)
if let Ok(existing) = self.find_from_token(&registration.as_token).await {
if existing.registration.id != registration.id {
return Err(err!(Request(InvalidParam(
"Cannot register appservice: Token is already used by appservice '{}'. \
Please generate a different token.",
existing.registration.id
))));
}
}
// Prevent token collision with existing user tokens
if self
.services
.users
.find_from_token(&registration.as_token)
.await
.insert(registration.id.clone(), registration.clone().try_into()?);
.is_ok()
{
return Err(err!(Request(InvalidParam(
"Cannot register appservice: The provided token is already in use by a user \
device. Please generate a different token for the appservice."
))));
}
self.db
.id_appserviceregistrations
.insert(&registration.id, appservice_config_body);
self.start_appservice(registration.id.clone(), registration.clone())
.await?;
Ok(())
}
@ -113,12 +212,14 @@ impl Service {
.map(|info| info.registration)
}
pub async fn find_from_token(&self, token: &str) -> Option<RegistrationInfo> {
/// Returns Result to match users::find_from_token for select_ok usage
pub async fn find_from_token(&self, token: &str) -> Result<RegistrationInfo> {
self.read()
.await
.values()
.find(|info| info.registration.as_token == token)
.cloned()
.ok_or_else(|| err!(Request(NotFound("Appservice token not found"))))
}
/// Checks if a given user id matches any exclusive appservice regex

View file

@ -41,6 +41,11 @@ impl crate::Service for Service {
return Ok(());
}
if self.services.config.ldap.enable {
warn!("emergency password feature not available with LDAP enabled.");
return Ok(());
}
self.set_emergency_access().await.inspect_err(|e| {
error!("Could not set the configured emergency password for the server user: {e}");
})
@ -57,7 +62,8 @@ impl Service {
self.services
.users
.set_password(server_user, self.services.config.emergency_password.as_deref())?;
.set_password(server_user, self.services.config.emergency_password.as_deref())
.await?;
let (ruleset, pwd_set) = match self.services.config.emergency_password {
| Some(_) => (Ruleset::server_default(server_user), true),

View file

@ -61,9 +61,12 @@ impl Data {
from: PduCount,
dir: Direction,
) -> impl Stream<Item = (PduCount, impl Event)> + Send + '_ {
// Query from exact position then filter excludes it (saturating_inc could skip
// events at min/max boundaries)
let from_unsigned = from.into_unsigned();
let mut current = ArrayVec::<u8, 16>::new();
current.extend(target.to_be_bytes());
current.extend(from.saturating_inc(dir).into_unsigned().to_be_bytes());
current.extend(from_unsigned.to_be_bytes());
let current = current.as_slice();
match dir {
| Direction::Forward => self.tofrom_relation.raw_keys_from(current).boxed(),
@ -73,6 +76,17 @@ impl Data {
.ready_take_while(move |key| key.starts_with(&target.to_be_bytes()))
.map(|to_from| u64_from_u8(&to_from[8..16]))
.map(PduCount::from_unsigned)
.ready_filter(move |count| {
if from == PduCount::min() || from == PduCount::max() {
true
} else {
let count_unsigned = count.into_unsigned();
match dir {
| Direction::Forward => count_unsigned > from_unsigned,
| Direction::Backward => count_unsigned < from_unsigned,
}
}
})
.wide_filter_map(move |shorteventid| async move {
let pdu_id: RawPduId = PduId { shortroomid, shorteventid }.into();

View file

@ -49,7 +49,7 @@ pub async fn update_membership(
#[allow(clippy::collapsible_if)]
if !self.services.globals.user_is_local(user_id) {
if !self.services.users.exists(user_id).await {
self.services.users.create(user_id, None)?;
self.services.users.create(user_id, None, None).await?;
}
}

View file

@ -1,11 +1,19 @@
#[cfg(feature = "ldap")]
use std::collections::HashMap;
use std::{collections::BTreeMap, mem, sync::Arc};
#[cfg(feature = "ldap")]
use conduwuit::result::LogErr;
use conduwuit::{
Err, Error, Result, Server, at, debug_warn, err, trace,
Err, Error, Result, Server, at, debug_warn, err, is_equal_to, trace,
utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted},
};
#[cfg(feature = "ldap")]
use conduwuit_core::{debug, error};
use database::{Deserialized, Ignore, Interfix, Json, Map};
use futures::{Stream, StreamExt, TryFutureExt};
#[cfg(feature = "ldap")]
use ldap3::{LdapConnAsync, Scope, SearchEntry};
use ruma::{
DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId,
OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId,
@ -19,7 +27,7 @@ use ruma::{
use serde::{Deserialize, Serialize};
use serde_json::json;
use crate::{Dep, account_data, admin, globals, rooms};
use crate::{Dep, account_data, admin, appservice, globals, rooms};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserSuspension {
@ -40,6 +48,7 @@ struct Services {
server: Arc<Server>,
account_data: Dep<account_data::Service>,
admin: Dep<admin::Service>,
appservice: Dep<appservice::Service>,
globals: Dep<globals::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
state_cache: Dep<rooms::state_cache::Service>,
@ -62,6 +71,7 @@ struct Data {
userid_displayname: Arc<Map>,
userid_lastonetimekeyupdate: Arc<Map>,
userid_masterkeyid: Arc<Map>,
userid_origin: Arc<Map>,
userid_password: Arc<Map>,
userid_suspension: Arc<Map>,
userid_selfsigningkeyid: Arc<Map>,
@ -76,6 +86,7 @@ impl crate::Service for Service {
server: args.server.clone(),
account_data: args.depend::<account_data::Service>("account_data"),
admin: args.depend::<admin::Service>("admin"),
appservice: args.depend::<appservice::Service>("appservice"),
globals: args.depend::<globals::Service>("globals"),
state_accessor: args
.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
@ -98,6 +109,7 @@ impl crate::Service for Service {
userid_displayname: args.db["userid_displayname"].clone(),
userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(),
userid_masterkeyid: args.db["userid_masterkeyid"].clone(),
userid_origin: args.db["userid_origin"].clone(),
userid_password: args.db["userid_password"].clone(),
userid_suspension: args.db["userid_suspension"].clone(),
userid_selfsigningkeyid: args.db["userid_selfsigningkeyid"].clone(),
@ -134,9 +146,21 @@ impl Service {
}
/// Create a new user account on this homeserver.
///
/// User origin is by default "password" (meaning that it will login using
/// its user_id/password). Users with other origins (currently only "ldap"
/// is available) have special login processes.
#[inline]
pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
self.set_password(user_id, password)
pub async fn create(
&self,
user_id: &UserId,
password: Option<&str>,
origin: Option<&str>,
) -> Result<()> {
self.db
.userid_origin
.insert(user_id, origin.unwrap_or("password"));
self.set_password(user_id, password).await
}
/// Deactivate account
@ -150,7 +174,7 @@ impl Service {
// result in an empty string, so the user will not be able to log in again.
// Systems like changing the password without logging in should check if the
// account is deactivated.
self.set_password(user_id, None)?;
self.set_password(user_id, None).await?;
// TODO: Unhook 3PID
Ok(())
@ -251,13 +275,34 @@ impl Service {
.ready_filter_map(|(u, p): (&UserId, &[u8])| (!p.is_empty()).then_some(u))
}
/// Returns the origin of the user (password/LDAP/...).
pub async fn origin(&self, user_id: &UserId) -> Result<String> {
self.db.userid_origin.get(user_id).await.deserialized()
}
/// Returns the password hash for the given user.
pub async fn password_hash(&self, user_id: &UserId) -> Result<String> {
self.db.userid_password.get(user_id).await.deserialized()
}
/// Hash and set the user's password to the Argon2 hash
pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
pub async fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
// Cannot change the password of a LDAP user. There are two special cases :
// - a `None` password can be used to deactivate a LDAP user
// - a "*" password is used as the default password of an active LDAP user
if cfg!(feature = "ldap")
&& password.is_some_and(|pwd| pwd != "*")
&& self
.db
.userid_origin
.get(user_id)
.await
.deserialized::<String>()
.is_ok_and(is_equal_to!("ldap"))
{
return Err!(Request(InvalidParam("Cannot change password of a LDAP user")));
}
password
.map(utils::hash::password)
.transpose()
@ -391,6 +436,31 @@ impl Service {
self.db.userdeviceid_token.qry(&key).await.deserialized()
}
/// Generate a unique access token that doesn't collide with existing tokens
pub async fn generate_unique_token(&self) -> String {
loop {
let token = utils::random_string(32);
// Check for collision with appservice tokens
if self
.services
.appservice
.find_from_token(&token)
.await
.is_ok()
{
continue;
}
// Check for collision with user tokens
if self.db.token_userdeviceid.get(&token).await.is_ok() {
continue;
}
return token;
}
}
/// Replaces the access token of one device.
pub async fn set_token(
&self,
@ -407,6 +477,19 @@ impl Service {
)));
}
// Check for token collision with appservices
if self
.services
.appservice
.find_from_token(token)
.await
.is_ok()
{
return Err!(Request(InvalidParam(
"Token conflicts with an existing appservice token"
)));
}
// Remove old token
if let Ok(old_token) = self.db.userdeviceid_token.qry(&key).await {
self.db.token_userdeviceid.remove(&old_token);
@ -1092,6 +1175,154 @@ impl Service {
self.db.useridprofilekey_value.del(key);
}
}
#[cfg(not(feature = "ldap"))]
pub async fn search_ldap(&self, _user_id: &UserId) -> Result<Vec<(String, bool)>> {
Err!(FeatureDisabled("ldap"))
}
#[cfg(feature = "ldap")]
pub async fn search_ldap(&self, user_id: &UserId) -> Result<Vec<(String, bool)>> {
let localpart = user_id.localpart().to_owned();
let lowercased_localpart = localpart.to_lowercase();
let config = &self.services.server.config.ldap;
let uri = config
.uri
.as_ref()
.ok_or_else(|| err!(Ldap(error!("LDAP URI is not configured."))))?;
debug!(?uri, "LDAP creating connection...");
let (conn, mut ldap) = LdapConnAsync::new(uri.as_str())
.await
.map_err(|e| err!(Ldap(error!(?user_id, "LDAP connection setup error: {e}"))))?;
let driver = self.services.server.runtime().spawn(async move {
match conn.drive().await {
| Err(e) => error!("LDAP connection error: {e}"),
| Ok(()) => debug!("LDAP connection completed."),
}
});
match (&config.bind_dn, &config.bind_password_file) {
| (Some(bind_dn), Some(bind_password_file)) => {
let bind_pw = String::from_utf8(std::fs::read(bind_password_file)?)?;
ldap.simple_bind(bind_dn, bind_pw.trim())
.await
.and_then(ldap3::LdapResult::success)
.map_err(|e| err!(Ldap(error!("LDAP bind error: {e}"))))?;
},
| (..) => {},
}
let attr = [&config.uid_attribute, &config.name_attribute];
let user_filter = &config.filter.replace("{username}", &lowercased_localpart);
let (entries, _result) = ldap
.search(&config.base_dn, Scope::Subtree, user_filter, &attr)
.await
.and_then(ldap3::SearchResult::success)
.inspect(|(entries, result)| trace!(?entries, ?result, "LDAP Search"))
.map_err(|e| err!(Ldap(error!(?attr, ?user_filter, "LDAP search error: {e}"))))?;
let mut dns: HashMap<String, bool> = entries
.into_iter()
.filter_map(|entry| {
let search_entry = SearchEntry::construct(entry);
debug!(?search_entry, "LDAP search entry");
search_entry
.attrs
.get(&config.uid_attribute)
.into_iter()
.chain(search_entry.attrs.get(&config.name_attribute))
.any(|ids| ids.contains(&localpart) || ids.contains(&lowercased_localpart))
.then_some((search_entry.dn, false))
})
.collect();
if !config.admin_filter.is_empty() {
let admin_base_dn = if config.admin_base_dn.is_empty() {
&config.base_dn
} else {
&config.admin_base_dn
};
let admin_filter = &config
.admin_filter
.replace("{username}", &lowercased_localpart);
let (admin_entries, _result) = ldap
.search(admin_base_dn, Scope::Subtree, admin_filter, &attr)
.await
.and_then(ldap3::SearchResult::success)
.inspect(|(entries, result)| trace!(?entries, ?result, "LDAP Admin Search"))
.map_err(|e| {
err!(Ldap(error!(?attr, ?admin_filter, "Ldap admin search error: {e}")))
})?;
dns.extend(admin_entries.into_iter().filter_map(|entry| {
let search_entry = SearchEntry::construct(entry);
debug!(?search_entry, "LDAP search entry");
search_entry
.attrs
.get(&config.uid_attribute)
.into_iter()
.chain(search_entry.attrs.get(&config.name_attribute))
.any(|ids| ids.contains(&localpart) || ids.contains(&lowercased_localpart))
.then_some((search_entry.dn, true))
}));
}
ldap.unbind()
.await
.map_err(|e| err!(Ldap(error!("LDAP unbind error: {e}"))))?;
driver.await.log_err().ok();
Ok(dns.drain().collect())
}
#[cfg(not(feature = "ldap"))]
pub async fn auth_ldap(&self, _user_dn: &str, _password: &str) -> Result {
Err!(FeatureDisabled("ldap"))
}
#[cfg(feature = "ldap")]
pub async fn auth_ldap(&self, user_dn: &str, password: &str) -> Result {
let config = &self.services.server.config.ldap;
let uri = config
.uri
.as_ref()
.ok_or_else(|| err!(Ldap(error!("LDAP URI is not configured."))))?;
debug!(?uri, "LDAP creating connection...");
let (conn, mut ldap) = LdapConnAsync::new(uri.as_str())
.await
.map_err(|e| err!(Ldap(error!(?user_dn, "LDAP connection setup error: {e}"))))?;
let driver = self.services.server.runtime().spawn(async move {
match conn.drive().await {
| Err(e) => error!("LDAP connection error: {e}"),
| Ok(()) => debug!("LDAP connection completed."),
}
});
ldap.simple_bind(user_dn, password)
.await
.and_then(ldap3::LdapResult::success)
.map_err(|e| {
err!(Request(Forbidden(debug_error!("LDAP authentication error: {e}"))))
})?;
ldap.unbind()
.await
.map_err(|e| err!(Ldap(error!("LDAP unbind error: {e}"))))?;
driver.await.log_err().ok();
Ok(())
}
}
pub fn parse_master_key(

View file

@ -48,7 +48,7 @@ vector () {
VECTOR_OPTS=$@
element "$TOOLCHAIN" $VECTOR_OPTS --no-default-features
element "$TOOLCHAIN" $VECTOR_OPTS --features=default
element "$TOOLCHAIN" $VECTOR_OPTS --all-features
element "$TOOLCHAIN" $VECTOR_OPTS --features full
}
matrix () {