Compare commits

...

56 commits

Author SHA1 Message Date
nexy7574
dd372f77eb Always calculate state diff IDs in syncv3
Some checks failed
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, linux-amd64) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, linux-arm64) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Rust Checks / Format (push) Failing after 1s
Rust Checks / Clippy (push) Failing after 8s
Rust Checks / Cargo Test (push) Failing after 10s
seemingly fixes #779
2025-05-26 10:47:04 -07:00
Jacob Taylor
dfd599d07d cache maxxing 2025-05-26 10:47:04 -07:00
Jacob Taylor
e06ed8ef13 upgrade some settings 2025-05-26 10:47:04 -07:00
Jacob Taylor
80f3a07e08 add futures::FutureExt to make cba9ee5240 work 2025-05-26 10:47:04 -07:00
Jason Volk
6aebf8aa72 Mitigate large futures
Signed-off-by: Jason Volk <jason@zemos.net>
2025-05-26 10:47:04 -07:00
Jacob Taylor
64e081dcbe bump the number of allowed immutable memtables by 1, to allow for greater flood protection 2025-05-26 10:47:04 -07:00
Jade Ellis
0232575e7d chore: Add CONTINUWUITY_ environment variables
Also updates some examples to match
2025-05-26 10:47:04 -07:00
Jade Ellis
137282ca3b chore: Fix typos across the codebase 2025-05-26 10:47:04 -07:00
Jade Ellis
461ad346c0 ci: Fix bad comparison 2025-05-26 10:46:33 -07:00
Jade Ellis
c081afeaec chore: Link to Matrix rooms directly 2025-05-26 10:46:33 -07:00
Jade Ellis
b5c38d7c12 feat: Prefill server name in federation test 2025-05-26 10:46:33 -07:00
Jade Ellis
6a2dde6eb8 ci: Cache timelord-cli to avoid unnecesary compilation 2025-05-26 10:46:33 -07:00
Jade Ellis
de4ba83a44 refactor: Move git version info gather in into a build script 2025-05-26 10:46:33 -07:00
Jade Ellis
ece2616395 feat: HTML default page 2025-05-26 10:46:09 -07:00
Glandos
b703ea003a Actualiser debian/conduwuit.service 2025-05-26 10:45:20 -07:00
Kokomo
142fc6ab59 Add maintainer emails 2025-05-26 10:45:15 -07:00
Kokomo
875194f546 Add back space oops 2025-05-26 10:45:15 -07:00
Kokomo
74a9303b2b Remove email and add reference to matrix space 2025-05-26 10:45:15 -07:00
Tom Foster
5411fdcac9 Tidy up publishing restriction check 2025-05-26 10:45:08 -07:00
Tom Foster
33102f886f Make Cloudflare Pages optional in CI 2025-05-26 10:45:08 -07:00
Jade Ellis
a2c293e22e chore: Error on missing ID in messages 2025-05-26 10:44:51 -07:00
Jade Ellis
c3ddb63065 feat: Allow controlling client message filtering 2025-05-26 10:44:43 -07:00
Jacob Taylor
bc53ae01f7 completely strike knowledge of the server from the moderation service 2025-05-26 10:44:21 -07:00
Jacob Taylor
357c9f11b9 probably incorrectly delete support for non-standardized matrix srv record 2025-05-26 10:43:59 -07:00
Jacob Taylor
da7def2bef Fix spaces rooms list load error. rev2 2025-05-26 10:43:59 -07:00
Jade Ellis
2e0bb01595 feat: For knock_restricted rooms, automatically join rooms we meet
Some checks failed
Documentation / Build and Deploy Documentation (push) Failing after 41s
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, linux-amd64) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, linux-arm64) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Rust Checks / Format (push) Failing after 1s
Rust Checks / Clippy (push) Failing after 10s
Rust Checks / Cargo Test (push) Failing after 8s
restrictions for rather than knocking
2025-05-26 16:23:20 +00:00
Jade Ellis
d9d8e0acfd fix: Allow joining via invite for knock_restricted rooms 2025-05-26 16:23:20 +00:00
Jade Ellis
e8d823a653
docs: Apply feedback on security policy
Some checks failed
Rust Checks / Format (push) Failing after 7s
Rust Checks / Clippy (push) Failing after 18s
Rust Checks / Cargo Test (push) Failing after 9s
2025-05-26 15:01:58 +01:00
Jade Ellis
0ba77674c7
docs: Security policy 2025-05-25 00:36:28 +01:00
Jade Ellis
2ccbd7d60b
fix: Reference config directly 2025-05-21 21:06:44 +01:00
Jade Ellis
60960c6e09
feat: Automatically set well-known support contacts 2025-05-21 20:32:53 +01:00
Jade Ellis
ce40304667
chore: Upgrade deps 2025-05-21 15:28:46 +01:00
Jade Ellis
dcbc4b54c5
ci: Always show sccache stats 2025-05-21 12:45:25 +01:00
Jade Ellis
fce024b30b
chore: Add must_use annotation 2025-05-21 12:45:14 +01:00
Jade Ellis
3e4e696761
fix: Make sure empty VERSION_EXTRA strings are ignored
Also updates built & removes unused optional features
2025-05-21 12:35:36 +01:00
Jason Volk
f605913ea9
Eliminate associated Id type from trait Event.
Co-authored-by: Jade Ellis <jade@ellis.link>
Signed-off-by: Jason Volk <jason@zemos.net>
2025-05-21 11:36:15 +01:00
Jason Volk
44302ce732
Eliminate explicit parallel_fetches argument.
Signed-off-by: Jason Volk <jason@zemos.net>
2025-05-21 11:36:15 +01:00
Jason Volk
bfb0a2b76a
Remove unused Pdu::into_any_event().
Signed-off-by: Jason Volk <jason@zemos.net>
2025-05-21 11:36:14 +01:00
Jason Volk
fcd5669aa1
Join jemalloc background threads prior to exit.
Co-authored-by: Jade Ellis <jade@ellis.link>
Signed-off-by: Jason Volk <jason@zemos.net>
2025-05-21 11:36:13 +01:00
Jade Ellis
9b8b37f162
docs: Badges for mirrors 2025-05-21 02:51:09 +01:00
Jade Ellis
7a46563f23
ci: Cache docker image build mounts 2025-05-21 01:48:25 +01:00
Jade Ellis
1bf6537319
build: Split docker target cache by target platform 2025-05-20 22:47:55 +01:00
Jade Ellis
4ed04b343a
build: Use xtrace in bash scripts in Dockerfile 2025-05-20 22:13:13 +01:00
Jade Ellis
a4ad72e11d
ci: Run cargo test 2025-05-20 21:48:40 +01:00
Jade Ellis
1f57508879
ci: Don't clippy check dependancies 2025-05-20 21:47:35 +01:00
Jade Ellis
a325dfa56a
ci: Use timelord in clippy check 2025-05-20 21:47:27 +01:00
Jade Ellis
b5d2ef9a4a
ci: Refactor timelord to its own action 2025-05-20 21:36:01 +01:00
Jade Ellis
e200a7d991
ci: Cache Rust registry 2025-05-20 21:36:01 +01:00
Jade Ellis
034762c619
chore: Allow raw string hashes for metadata crate 2025-05-20 21:36:00 +01:00
Jade Ellis
e31d261e66
ci: Run clippy check 2025-05-20 21:36:00 +01:00
Jade Ellis
c5db43ba9a
chore: Docker ignore forgejo files 2025-05-20 21:31:41 +01:00
Jade Ellis
ec08e16b9f
build: Allow builder to decide on incremental or not 2025-05-20 21:31:41 +01:00
Jade Ellis
f14725a51b
ci: Check formatting
Also moves rustup installation to a seperate workflow and
enables caching.
The sccache action required a github.com api token, so we set
all that up too.
2025-05-20 21:31:41 +01:00
Jade Ellis
d03325c65a
chore: Set editorconfig for workflows 2025-05-20 21:31:40 +01:00
Jade Ellis
066794fe90
ci: Don't try build images on PR 2025-05-20 21:31:40 +01:00
Jade Ellis
beee996f72
docs: Rename conduwuit to continuwuity in more places 2025-05-10 20:37:08 +01:00
45 changed files with 770 additions and 376 deletions

View file

@ -15,6 +15,7 @@ docker/
.gitea
.gitlab
.github
.forgejo
# Dot files
.env

View file

@ -22,3 +22,7 @@ indent_size = 2
[*.rs]
indent_style = tab
max_line_length = 98
[{.forgejo/**/*.yml,.github/**/*.yml}]
indent_size = 2
indent_style = space

View file

@ -0,0 +1,53 @@
name: rust-toolchain
description: |
Install a Rust toolchain using rustup.
See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification
for more information about toolchains.
inputs:
toolchain:
description: |
Rust toolchain name.
See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification
required: false
target:
description: Target triple to install for this toolchain
required: false
components:
description: Space-separated list of components to be additionally installed for a new toolchain
required: false
outputs:
rustc_version:
description: The rustc version installed
value: ${{ steps.rustc-version.outputs.version }}
runs:
using: composite
steps:
- name: Cache rustup toolchains
uses: actions/cache@v3
with:
path: |
~/.rustup
!~/.rustup/tmp
!~/.rustup/downloads
# Requires repo to be cloned if toolchain is not specified
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
- name: Install Rust toolchain
shell: bash
run: |
if ! command -v rustup &> /dev/null ; then
curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y
echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH
fi
- shell: bash
run: |
set -x
${{ inputs.toolchain && format('rustup override set {0}', inputs.toolchain) }}
${{ inputs.target && format('rustup target add {0}', inputs.target) }}
${{ inputs.components && format('rustup component add {0}', inputs.components) }}
cargo --version
rustc --version
- id: rustc-version
shell: bash
run: |
echo "version=$(rustc --version)" >> $GITHUB_OUTPUT

View file

@ -0,0 +1,29 @@
name: sccache
description: |
Install sccache for caching builds in GitHub Actions.
inputs:
token:
description: 'A Github PAT'
required: false
runs:
using: composite
steps:
- name: Install sccache
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
with:
token: ${{ inputs.token }}
- name: Configure sccache
uses: https://github.com/actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
- shell: bash
run: |
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV

View file

@ -0,0 +1,46 @@
name: timelord
description: |
Use timelord to set file timestamps
inputs:
key:
description: |
The key to use for caching the timelord data.
This should be unique to the repository and the runner.
required: true
default: timelord-v0
path:
description: |
The path to the directory to be timestamped.
This should be the root of the repository.
required: true
default: .
runs:
using: composite
steps:
- name: Cache timelord-cli installation
id: cache-timelord-bin
uses: actions/cache@v3
with:
path: ~/.cargo/bin/timelord
key: timelord-cli-v3.0.1
- name: Install timelord-cli
uses: https://github.com/cargo-bins/cargo-binstall@main
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
- run: cargo binstall timelord-cli@3.0.1
shell: bash
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
- name: Load timelord files
uses: actions/cache/restore@v3
with:
path: /timelord/
key: ${{ inputs.key }}
- name: Run timelord to set timestamps
shell: bash
run: timelord sync --source-dir ${{ inputs.path }} --cache-dir /timelord/
- name: Save timelord
uses: actions/cache/save@v3
with:
path: /timelord/
key: ${{ inputs.key }}

View file

@ -0,0 +1,142 @@
name: Rust Checks
on:
push:
jobs:
format:
name: Format
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
with:
toolchain: "nightly"
components: "rustfmt"
- name: Check formatting
run: |
cargo +nightly fmt --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Clippy
run: |
cargo clippy \
--workspace \
--locked \
--no-deps \
--profile test \
-- \
-D warnings
- name: Show sccache stats
if: always()
run: sccache --show-stats
cargo-test:
name: Cargo Test
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install rust
uses: ./.forgejo/actions/rust-toolchain
- uses: https://github.com/actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.GH_APP_ID }}
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
github-api-url: https://api.github.com
owner: ${{ vars.GH_APP_OWNER }}
repositories: ""
- name: Install sccache
uses: ./.forgejo/actions/sccache
with:
token: ${{ steps.app-token.outputs.token }}
- run: sudo apt-get update
- name: Install system dependencies
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
with:
packages: clang liburing-dev
version: 1
- name: Cache Rust registry
uses: actions/cache@v3
with:
path: |
~/.cargo/git
!~/.cargo/git/checkouts
~/.cargo/registry
!~/.cargo/registry/src
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
- name: Timelord
uses: ./.forgejo/actions/timelord
with:
key: sccache-v0
path: .
- name: Cargo Test
run: |
cargo test \
--workspace \
--locked \
--profile test \
--all-targets \
--no-fail-fast
- name: Show sccache stats
if: always()
run: sccache --show-stats

View file

@ -1,6 +1,3 @@
[files]
extend-exclude = ["*.csr"]
[default.extend-words]
"allocatedp" = "allocatedp"
"conduwuit" = "conduwuit"

View file

@ -1,6 +1,6 @@
# Contributing guide
This page is for about contributing to conduwuit. The
This page is for about contributing to Continuwuity. The
[development](./development.md) page may be of interest for you as well.
If you would like to work on an [issue][issues] that is not assigned, preferably
@ -73,7 +73,7 @@ If you'd like to run Complement locally using Nix, see the
### Writing documentation
conduwuit's website uses [`mdbook`][mdbook] and deployed via CI using GitHub
Continuwuity's website uses [`mdbook`][mdbook] and deployed via CI using GitHub
Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's
mdbook in the devshell. All documentation is in the `docs/` directory at the top
level. The compiled mdbook website is also uploaded as an artifact.

View file

@ -7,10 +7,15 @@
<!-- ANCHOR_END: catchphrase -->
[continuwuity] is a Matrix homeserver written in Rust.
It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
<!-- ANCHOR: body -->
[![forgejo.ellis.link](https://img.shields.io/badge/Ellis%20Git-main+packages-green?style=flat&logo=forgejo&labelColor=fff)](https://forgejo.ellis.link/continuwuation/continuwuity) ![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/stars.svg?style=flat) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/issues/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/pulls/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open)
[![GitHub](https://img.shields.io/badge/GitHub-mirror-blue?style=flat&logo=github&labelColor=fff&logoColor=24292f)](https://github.com/continuwuity/continuwuity) ![](https://img.shields.io/github/stars/continuwuity/continuwuity?style=flat)
[![Codeberg](https://img.shields.io/badge/Codeberg-mirror-2185D0?style=flat&logo=codeberg&labelColor=fff)](https://codeberg.org/nexy7574/continuwuity) ![](https://codeberg.org/nexy7574/continuwuity/badges/stars.svg?style=flat)
### Why does this exist?
@ -112,4 +117,3 @@ Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [
[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity

63
SECURITY.md Normal file
View file

@ -0,0 +1,63 @@
# Security Policy for Continuwuity
This document outlines the security policy for Continuwuity. Our goal is to maintain a secure platform for all users, and we take security matters seriously.
## Supported Versions
We provide security updates for the following versions of Continuwuity:
| Version | Supported |
| -------------- |:----------------:|
| Latest release | ✅ |
| Main branch | ✅ |
| Older releases | ❌ |
We may backport fixes to the previous release at our discretion, but we don't guarantee this.
## Reporting a Vulnerability
### Responsible Disclosure
We appreciate the efforts of security researchers and the community in identifying and reporting vulnerabilities. To ensure that potential vulnerabilities are addressed properly, please follow these guidelines:
1. Contact members of the team over E2EE private message.
- [@jade:ellis.link](https://matrix.to/#/@jade:ellis.link)
- [@nex:nexy7574.co.uk](https://matrix.to/#/@nex:nexy7574.co.uk) <!-- ? -->
2. **Email the security team** directly at [security@continuwuity.org](mailto:security@continuwuity.org). This is not E2EE, so don't include sensitive details.
3. **Do not disclose the vulnerability publicly** until it has been addressed
4. **Provide detailed information** about the vulnerability, including:
- A clear description of the issue
- Steps to reproduce
- Potential impact
- Any possible mitigations
- Version(s) affected, including specific commits if possible
If you have any doubts about a potential security vulnerability, contact us via private channels first! We'd prefer that you bother us, instead of having a vulnerability disclosed without a fix.
### What to Expect
When you report a security vulnerability:
1. **Acknowledgment**: We will acknowledge receipt of your report.
2. **Assessment**: We will assess the vulnerability and determine its impact on our users
3. **Updates**: We will provide updates on our progress in addressing the vulnerability, and may request you help test mitigations
4. **Resolution**: Once resolved, we will notify you and discuss coordinated disclosure
5. **Credit**: We will recognize your contribution (unless you prefer to remain anonymous)
## Security Update Process
When security vulnerabilities are identified:
1. We will develop and test fixes in a private branch
2. Security updates will be released as soon as possible
3. Release notes will include information about the vulnerabilities, avoiding details that could facilitate exploitation where possible
4. Critical security updates may be backported to the previous stable release
## Additional Resources
- [Matrix Security Disclosure Policy](https://matrix.org/security-disclosure-policy/)
- [Continuwuity Documentation](https://continuwuity.org/introduction)
---
This security policy was last updated on May 25, 2025.

4
debian/README.md vendored
View file

@ -1,4 +1,4 @@
# conduwuit for Debian
# Continuwuity for Debian
Information about downloading and deploying the Debian package. This may also be
referenced for other `apt`-based distros such as Ubuntu.
@ -22,7 +22,7 @@ options in `/etc/conduwuit/conduwuit.toml`.
### Running
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop conduwuit. The binary is installed at `/usr/sbin/conduwuit`.
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary is installed at `/usr/sbin/conduwuit`.
This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate.

View file

@ -20,3 +20,4 @@
- [Testing](development/testing.md)
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
- [Community (and Guidelines)](community.md)
- [Security](security.md)

View file

@ -6,11 +6,11 @@ services:
- "traefik.enable=true"
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
- "traefik.http.routers.to-conduwuit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Continuwuity is hosted
- "traefik.http.routers.to-conduwuit.tls=true"
- "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt"
- "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker"
- "traefik.http.services.to_conduwuit.loadbalancer.server.port=6167"
- "traefik.http.routers.to-continuwuity.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Continuwuity is hosted
- "traefik.http.routers.to-continuwuity.tls=true"
- "traefik.http.routers.to-continuwuity.tls.certresolver=letsencrypt"
- "traefik.http.routers.to-continuwuity.middlewares=cors-headers@docker"
- "traefik.http.services.to_continuwuity.loadbalancer.server.port=6167"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"

View file

@ -25,23 +25,23 @@ services:
image: forgejo.ellis.link/continuwuation/continuwuity:latest
restart: unless-stopped
volumes:
- db:/var/lib/conduwuit
- db:/var/lib/continuwuity
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
#- ./conduwuit.toml:/etc/conduwuit.toml
#- ./continuwuity.toml:/etc/continuwuity.toml
environment:
CONDUWUIT_SERVER_NAME: example.com # EDIT THIS
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
CONDUWUIT_PORT: 6167
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
CONDUWUIT_ALLOW_REGISTRATION: 'true'
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
CONDUWUIT_ALLOW_FEDERATION: 'true'
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
#CONDUWUIT_LOG: warn,state_res=warn
CONDUWUIT_ADDRESS: 0.0.0.0
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
CONTINUWUITY_PORT: 6167
CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
CONTINUWUITY_ALLOW_REGISTRATION: 'true'
CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
#CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
CONTINUWUITY_ALLOW_FEDERATION: 'true'
CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
#CONTINUWUITY_LOG: warn,state_res=warn
CONTINUWUITY_ADDRESS: 0.0.0.0
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
networks:
- caddy
labels:

View file

@ -9,22 +9,22 @@ services:
ports:
- 8448:6167
volumes:
- db:/var/lib/conduwuit
#- ./conduwuit.toml:/etc/conduwuit.toml
- db:/var/lib/continuwuity
#- ./continuwuity.toml:/etc/continuwuity.toml
environment:
CONDUWUIT_SERVER_NAME: your.server.name # EDIT THIS
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
CONDUWUIT_PORT: 6167
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
CONDUWUIT_ALLOW_REGISTRATION: 'true'
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
CONDUWUIT_ALLOW_FEDERATION: 'true'
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
#CONDUWUIT_LOG: warn,state_res=warn
CONDUWUIT_ADDRESS: 0.0.0.0
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
CONTINUWUITY_SERVER_NAME: your.server.name # EDIT THIS
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
CONTINUWUITY_PORT: 6167
CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
CONTINUWUITY_ALLOW_REGISTRATION: 'true'
CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
#CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
CONTINUWUITY_ALLOW_FEDERATION: 'true'
CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
#CONTINUWUITY_LOG: warn,state_res=warn
CONTINUWUITY_ADDRESS: 0.0.0.0
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
#
### Uncomment if you want to use your own Element-Web App.
### Note: You need to provide a config.json for Element and you also need a second

View file

@ -30,16 +30,16 @@ When you have the image you can simply run it with
```bash
docker run -d -p 8448:6167 \
-v db:/var/lib/conduwuit/ \
-e CONDUWUIT_SERVER_NAME="your.server.name" \
-e CONDUWUIT_ALLOW_REGISTRATION=false \
--name conduwuit $LINK
-v db:/var/lib/continuwuity/ \
-e CONTINUWUITY_SERVER_NAME="your.server.name" \
-e CONTINUWUITY_ALLOW_REGISTRATION=false \
--name continuwuity $LINK
```
or you can use [docker compose](#docker-compose).
The `-d` flag lets the container run in detached mode. You may supply an
optional `conduwuit.toml` config file, the example config can be found
optional `continuwuity.toml` config file, the example config can be found
[here](../configuration/examples.md). You can pass in different env vars to
change config values on the fly. You can even configure Continuwuity completely by
using env vars. For an overview of possible values, please take a look at the

View file

@ -115,7 +115,7 @@ ReadWritePaths=/path/to/custom/database/path
## Creating the Continuwuity configuration file
Now we need to create the Continuwuity's config file in
`/etc/conduwuit/conduwuit.toml`. The example config can be found at
`/etc/continuwuity/continuwuity.toml`. The example config can be found at
[conduwuit-example.toml](../configuration/examples.md).
**Please take a moment to read the config. You need to change at least the

View file

@ -190,7 +190,7 @@ The initial implementation PR is available [here][1].
- [Workspace-level metadata
(cargo-deb)](https://github.com/kornelski/cargo-deb/issues/68)
[1]: https://github.com/girlbossceo/conduwuit/pull/387
[1]: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/387
[2]: https://wiki.musl-libc.org/functional-differences-from-glibc.html#Unloading-libraries
[3]: https://github.com/rust-lang/rust/issues/28794
[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049

View file

@ -24,8 +24,9 @@ and run the script.
If you're on macOS and need to build an image, run `nix build .#linux-complement`.
We have a Complement fork as some tests have needed to be fixed. This can be found
at: <https://github.com/girlbossceo/complement>
at: <https://forgejo.ellis.link/continuwuation/complement>
[ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo
[ci-workflows]:
https://forgejo.ellis.link/continuwuation/continuwuity/actions/?workflow=ci.yml&actor=0&status=1
[complement]: https://github.com/matrix-org/complement
[direnv]: https://direnv.net/docs/hook.html

1
docs/security.md Normal file
View file

@ -0,0 +1 @@
{{#include ../SECURITY.md}}

View file

@ -33,13 +33,13 @@ dockerTools.buildLayeredImage {
<jason@zemos.net>";
"org.opencontainers.image.created" ="@${toString inputs.self.lastModified}";
"org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust";
"org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/";
"org.opencontainers.image.documentation" = "https://continuwuity.org/";
"org.opencontainers.image.licenses" = "Apache-2.0";
"org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or "";
"org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit";
"org.opencontainers.image.source" = "https://forgejo.ellis.link/continuwuation/continuwuity";
"org.opencontainers.image.title" = main.pname;
"org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/";
"org.opencontainers.image.vendor" = "girlbossceo";
"org.opencontainers.image.url" = "https://continuwuity.org/";
"org.opencontainers.image.vendor" = "continuwuation";
"org.opencontainers.image.version" = main.version;
};
};

View file

@ -94,7 +94,7 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
#[allow(clippy::result_large_err)]
fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
let link =
"Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺";
"Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺";
let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}");
let content = RoomMessageEventContent::notice_markdown(msg);
error!("Panic while processing command: {error:?}");

View file

@ -6,6 +6,7 @@ use conduwuit::{
warn,
};
use futures::StreamExt;
use futures::FutureExt;
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
use crate::{admin_command, admin_command_dispatch, get_room_info};
@ -155,7 +156,10 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
evicting admins too)",
);
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
if let Err(e) = leave_room(self.services, user_id, &room_id, None)
.boxed()
.await
{
warn!("Failed to leave room: {e}");
}
@ -323,7 +327,10 @@ async fn ban_list_of_rooms(&self) -> Result {
evicting admins too)",
);
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
if let Err(e) = leave_room(self.services, user_id, &room_id, None)
.boxed()
.await
{
warn!("Failed to leave room: {e}");
}

View file

@ -9,6 +9,7 @@ use conduwuit::{
};
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
use futures::StreamExt;
use futures::FutureExt;
use ruma::{
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
events::{
@ -655,7 +656,9 @@ pub(super) async fn force_leave_room(
return Err!("{user_id} is not joined in the room");
}
leave_room(self.services, &user_id, &room_id, None).await?;
leave_room(self.services, &user_id, &room_id, None)
.boxed()
.await?;
self.write_str(&format!("{user_id} has left {room_id}.",))
.await

View file

@ -763,7 +763,9 @@ pub(crate) async fn deactivate_route(
super::update_displayname(&services, sender_user, None, &all_joined_rooms).await;
super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await;
full_user_deactivate(&services, sender_user, &all_joined_rooms).await?;
full_user_deactivate(&services, sender_user, &all_joined_rooms)
.boxed()
.await?;
info!("User {sender_user} deactivated their account.");
@ -915,7 +917,9 @@ pub async fn full_user_deactivate(
}
}
super::leave_all_rooms(services, user_id).await;
super::leave_all_rooms(services, user_id)
.boxed()
.await;
Ok(())
}

View file

@ -114,7 +114,9 @@ async fn banned_room_check(
.collect()
.await;
full_user_deactivate(services, user_id, &all_joined_rooms).await?;
full_user_deactivate(services, user_id, &all_joined_rooms)
.boxed()
.await?;
}
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
@ -153,7 +155,9 @@ async fn banned_room_check(
.collect()
.await;
full_user_deactivate(services, user_id, &all_joined_rooms).await?;
full_user_deactivate(services, user_id, &all_joined_rooms)
.boxed()
.await?;
}
return Err!(Request(Forbidden("This remote server is banned on this homeserver.")));
@ -259,6 +263,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
room_id.server_name(),
client,
)
.boxed()
.await?;
let mut servers = body.via.clone();
@ -478,6 +483,7 @@ pub(crate) async fn leave_room_route(
body: Ruma<leave_room::v3::Request>,
) -> Result<leave_room::v3::Response> {
leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone())
.boxed()
.await
.map(|()| leave_room::v3::Response::new())
}
@ -1792,7 +1798,10 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
for room_id in all_rooms {
// ignore errors
if let Err(e) = leave_room(services, user_id, &room_id, None).await {
if let Err(e) = leave_room(services, user_id, &room_id, None)
.boxed()
.await
{
warn!(%user_id, "Failed to leave {room_id} remotely: {e}");
}
@ -2162,6 +2171,109 @@ async fn knock_room_by_id_helper(
}
}
// For knock_restricted rooms, check if the user meets the restricted conditions
// If they do, attempt to join instead of knock
// This is not mentioned in the spec, but should be allowable (we're allowed to
// auto-join invites to knocked rooms)
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
if let JoinRule::KnockRestricted(restricted) = &join_rule {
let restriction_rooms: Vec<_> = restricted
.allow
.iter()
.filter_map(|a| match a {
| AllowRule::RoomMembership(r) => Some(&r.room_id),
| _ => None,
})
.collect();
// Check if the user is in any of the allowed rooms
let mut user_meets_restrictions = false;
for restriction_room_id in &restriction_rooms {
if services
.rooms
.state_cache
.is_joined(sender_user, restriction_room_id)
.await
{
user_meets_restrictions = true;
break;
}
}
// If the user meets the restrictions, try joining instead
if user_meets_restrictions {
debug_info!(
"{sender_user} meets the restricted criteria in knock_restricted room \
{room_id}, attempting to join instead of knock"
);
// For this case, we need to drop the state lock and get a new one in
// join_room_by_id_helper We need to release the lock here and let
// join_room_by_id_helper acquire it again
drop(state_lock);
match join_room_by_id_helper(
services,
sender_user,
room_id,
reason.clone(),
servers,
None,
&None,
)
.await
{
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
| Err(e) => {
debug_warn!(
"Failed to convert knock to join for {sender_user} in {room_id}: {e:?}"
);
// Get a new state lock for the remaining knock logic
let new_state_lock = services.rooms.state.mutex.lock(room_id).await;
let server_in_room = services
.rooms
.state_cache
.server_in_room(services.globals.server_name(), room_id)
.await;
let local_knock = server_in_room
|| servers.is_empty()
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
if local_knock {
knock_room_helper_local(
services,
sender_user,
room_id,
reason,
servers,
new_state_lock,
)
.boxed()
.await?;
} else {
knock_room_helper_remote(
services,
sender_user,
room_id,
reason,
servers,
new_state_lock,
)
.boxed()
.await?;
}
return Ok(knock_room::v3::Response::new(room_id.to_owned()));
},
}
}
} else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
debug_warn!(
"{sender_user} attempted to knock on room {room_id} but its join rule is \
{join_rule:?}, not knock or knock_restricted"
);
}
let server_in_room = services
.rooms
.state_cache
@ -2209,6 +2321,12 @@ async fn knock_room_helper_local(
return Err!(Request(Forbidden("This room does not support knocking.")));
}
// Verify that this room has a valid knock or knock_restricted join rule
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
return Err!(Request(Forbidden("This room's join rule does not allow knocking.")));
}
let content = RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),

View file

@ -121,7 +121,9 @@ where
.map(|(key, val)| (key, val.collect()))
.collect();
if !populate {
if populate {
rooms.push(summary_to_chunk(summary.clone()));
} else {
children = children
.iter()
.rev()
@ -144,10 +146,8 @@ where
.collect();
}
if populate {
rooms.push(summary_to_chunk(summary.clone()));
} else if queue.is_empty() && children.is_empty() {
return Err!(Request(InvalidParam("Room IDs in token were not found.")));
if !populate && queue.is_empty() && children.is_empty() {
break;
}
parents.insert(current_room.clone());

View file

@ -6,6 +6,7 @@ use conduwuit::{
};
use conduwuit_service::Services;
use futures::TryStreamExt;
use futures::FutureExt;
use ruma::{
OwnedEventId, RoomId, UserId,
api::client::state::{get_state_events, get_state_events_for_key, send_state_event},
@ -59,6 +60,7 @@ pub(crate) async fn send_state_event_for_empty_key_route(
body: Ruma<send_state_event::v3::Request>,
) -> Result<RumaResponse<send_state_event::v3::Response>> {
send_state_event_for_key_route(State(services), body)
.boxed()
.await
.map(RumaResponse)
}

View file

@ -1009,8 +1009,6 @@ async fn calculate_state_incremental<'a>(
) -> Result<StateChanges> {
let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash);
let state_changed = since_shortstatehash != current_shortstatehash;
let encrypted_room = services
.rooms
.state_accessor
@ -1042,7 +1040,7 @@ async fn calculate_state_incremental<'a>(
})
.into();
let state_diff_ids: OptionFuture<_> = (!full_state && state_changed)
let state_diff_ids: OptionFuture<_> = (!full_state)
.then(|| {
StreamExt::into_future(
services

View file

@ -274,6 +274,10 @@ pub fn set_dirty_decay<I: Into<Option<usize>>>(arena: I, decay_ms: isize) -> Res
}
}
pub fn background_thread_enable(enable: bool) -> Result<bool> {
set::<u8>(&mallctl!("background_thread"), enable.into()).map(is_nonzero!())
}
#[inline]
#[must_use]
pub fn is_affine_arena() -> bool { is_percpu_arena() || is_phycpu_arena() }

View file

@ -118,7 +118,7 @@ pub fn check(config: &Config) -> Result {
if cfg!(not(debug_assertions)) && config.server_name == "your.server.name" {
return Err!(Config(
"server_name",
"You must specify a valid server name for production usage of conduwuit."
"You must specify a valid server name for production usage of continuwuity."
));
}
@ -290,7 +290,7 @@ fn warn_deprecated(config: &Config) {
if was_deprecated {
warn!(
"Read conduwuit config documentation at https://conduwuit.puppyirl.gay/configuration.html and check your \
"Read continuwuity config documentation at https://continuwuity.org/configuration.html and check your \
configuration if any new configuration parameters should be adjusted"
);
}

View file

@ -2042,41 +2042,41 @@ fn default_database_backups_to_keep() -> i16 { 1 }
fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) }
fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) }
fn default_db_cache_capacity_mb() -> f64 { 256.0 + parallelism_scaled_f64(256.0) }
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(200_000) }
fn default_cache_capacity_modifier() -> f64 { 1.0 }
fn default_auth_chain_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
parallelism_scaled_u32(25_000).saturating_add(200_000)
}
fn default_shorteventid_cache_capacity() -> u32 {
parallelism_scaled_u32(50_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(200_000)
}
fn default_eventidshort_cache_capacity() -> u32 {
parallelism_scaled_u32(25_000).saturating_add(100_000)
parallelism_scaled_u32(25_000).saturating_add(200_000)
}
fn default_eventid_pdu_cache_capacity() -> u32 {
parallelism_scaled_u32(25_000).saturating_add(100_000)
parallelism_scaled_u32(25_000).saturating_add(200_000)
}
fn default_shortstatekey_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
parallelism_scaled_u32(25_000).saturating_add(200_000)
}
fn default_statekeyshort_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
parallelism_scaled_u32(25_000).saturating_add(200_000)
}
fn default_servernameevent_data_cache_capacity() -> u32 {
parallelism_scaled_u32(100_000).saturating_add(500_000)
parallelism_scaled_u32(200_000).saturating_add(500_000)
}
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) }
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(2000) }
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }

View file

@ -1,18 +1,10 @@
use std::{
borrow::Borrow,
fmt::{Debug, Display},
hash::Hash,
};
use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType};
use serde_json::value::RawValue as RawJsonValue;
/// Abstraction of a PDU so users can have their own PDU types.
pub trait Event {
type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow<EventId>;
/// The `EventId` of this event.
fn event_id(&self) -> &Self::Id;
fn event_id(&self) -> &EventId;
/// The `RoomId` of this event.
fn room_id(&self) -> &RoomId;
@ -34,20 +26,18 @@ pub trait Event {
/// The events before this event.
// Requires GATs to avoid boxing (and TAIT for making it convenient).
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_;
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_;
/// All the authenticating events for this event.
// Requires GATs to avoid boxing (and TAIT for making it convenient).
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_;
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_;
/// If this event is a redaction event this is the event it redacts.
fn redacts(&self) -> Option<&Self::Id>;
fn redacts(&self) -> Option<&EventId>;
}
impl<T: Event> Event for &T {
type Id = T::Id;
fn event_id(&self) -> &Self::Id { (*self).event_id() }
fn event_id(&self) -> &EventId { (*self).event_id() }
fn room_id(&self) -> &RoomId { (*self).room_id() }
@ -61,13 +51,13 @@ impl<T: Event> Event for &T {
fn state_key(&self) -> Option<&str> { (*self).state_key() }
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_ {
(*self).prev_events()
}
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_ {
(*self).auth_events()
}
fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() }
fn redacts(&self) -> Option<&EventId> { (*self).redacts() }
}

View file

@ -79,9 +79,7 @@ impl Pdu {
}
impl Event for Pdu {
type Id = OwnedEventId;
fn event_id(&self) -> &Self::Id { &self.event_id }
fn event_id(&self) -> &EventId { &self.event_id }
fn room_id(&self) -> &RoomId { &self.room_id }
@ -97,15 +95,15 @@ impl Event for Pdu {
fn state_key(&self) -> Option<&str> { self.state_key.as_deref() }
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
self.prev_events.iter()
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_ {
self.prev_events.iter().map(AsRef::as_ref)
}
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
self.auth_events.iter()
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_ {
self.auth_events.iter().map(AsRef::as_ref)
}
fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() }
fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() }
}
/// Prevent derived equality which wouldn't limit itself to event_id

View file

@ -1,8 +1,8 @@
use ruma::{
events::{
AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent,
AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent,
room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent,
AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent,
AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, room::member::RoomMemberEventContent,
space::child::HierarchySpaceChildEvent,
},
serde::Raw,
};
@ -10,41 +10,6 @@ use serde_json::{json, value::Value as JsonValue};
use crate::implement;
/// This only works for events that are also AnyRoomEvents.
#[must_use]
#[implement(super::Pdu)]
pub fn into_any_event(self) -> Raw<AnyEphemeralRoomEvent> {
serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works")
}
/// This only works for events that are also AnyRoomEvents.
#[implement(super::Pdu)]
#[must_use]
#[inline]
pub fn into_any_event_value(self) -> JsonValue {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
"content": content,
"type": self.kind,
"event_id": self.event_id,
"sender": self.sender,
"origin_server_ts": self.origin_server_ts,
"room_id": self.room_id,
});
if let Some(unsigned) = &self.unsigned {
json["unsigned"] = json!(unsigned);
}
if let Some(state_key) = &self.state_key {
json["state_key"] = json!(state_key);
}
if let Some(redacts) = &redacts {
json["redacts"] = json!(redacts);
}
json
}
#[implement(super::Pdu)]
#[must_use]
#[inline]
@ -53,7 +18,8 @@ pub fn into_room_event(self) -> Raw<AnyTimelineEvent> { self.to_room_event() }
#[implement(super::Pdu)]
#[must_use]
pub fn to_room_event(&self) -> Raw<AnyTimelineEvent> {
serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works")
let value = self.to_room_event_value();
serde_json::from_value(value).expect("Failed to serialize Event value")
}
#[implement(super::Pdu)]
@ -91,8 +57,8 @@ pub fn into_message_like_event(self) -> Raw<AnyMessageLikeEvent> { self.to_messa
#[implement(super::Pdu)]
#[must_use]
pub fn to_message_like_event(&self) -> Raw<AnyMessageLikeEvent> {
serde_json::from_value(self.to_message_like_event_value())
.expect("Raw::from_value always works")
let value = self.to_message_like_event_value();
serde_json::from_value(value).expect("Failed to serialize Event value")
}
#[implement(super::Pdu)]
@ -130,7 +96,8 @@ pub fn into_sync_room_event(self) -> Raw<AnySyncTimelineEvent> { self.to_sync_ro
#[implement(super::Pdu)]
#[must_use]
pub fn to_sync_room_event(&self) -> Raw<AnySyncTimelineEvent> {
serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works")
let value = self.to_sync_room_event_value();
serde_json::from_value(value).expect("Failed to serialize Event value")
}
#[implement(super::Pdu)]
@ -162,7 +129,8 @@ pub fn to_sync_room_event_value(&self) -> JsonValue {
#[implement(super::Pdu)]
#[must_use]
pub fn into_state_event(self) -> Raw<AnyStateEvent> {
serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works")
let value = self.into_state_event_value();
serde_json::from_value(value).expect("Failed to serialize Event value")
}
#[implement(super::Pdu)]
@ -189,8 +157,8 @@ pub fn into_state_event_value(self) -> JsonValue {
#[implement(super::Pdu)]
#[must_use]
pub fn into_sync_state_event(self) -> Raw<AnySyncStateEvent> {
serde_json::from_value(self.into_sync_state_event_value())
.expect("Raw::from_value always works")
let value = self.into_sync_state_event_value();
serde_json::from_value(value).expect("Failed to serialize Event value")
}
#[implement(super::Pdu)]
@ -223,8 +191,8 @@ pub fn into_stripped_state_event(self) -> Raw<AnyStrippedStateEvent> {
#[implement(super::Pdu)]
#[must_use]
pub fn to_stripped_state_event(&self) -> Raw<AnyStrippedStateEvent> {
serde_json::from_value(self.to_stripped_state_event_value())
.expect("Raw::from_value always works")
let value = self.to_stripped_state_event_value();
serde_json::from_value(value).expect("Failed to serialize Event value")
}
#[implement(super::Pdu)]
@ -242,8 +210,8 @@ pub fn to_stripped_state_event_value(&self) -> JsonValue {
#[implement(super::Pdu)]
#[must_use]
pub fn into_stripped_spacechild_state_event(self) -> Raw<HierarchySpaceChildEvent> {
serde_json::from_value(self.into_stripped_spacechild_state_event_value())
.expect("Raw::from_value always works")
let value = self.into_stripped_spacechild_state_event_value();
serde_json::from_value(value).expect("Failed to serialize Event value")
}
#[implement(super::Pdu)]
@ -262,7 +230,8 @@ pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue {
#[implement(super::Pdu)]
#[must_use]
pub fn into_member_event(self) -> Raw<StateEvent<RoomMemberEventContent>> {
serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works")
let value = self.into_member_event_value();
serde_json::from_value(value).expect("Failed to serialize Event value")
}
#[implement(super::Pdu)]

View file

@ -52,7 +52,6 @@ fn lexico_topo_sort(c: &mut test::Bencher) {
#[cfg(conduwuit_bench)]
#[cfg_attr(conduwuit_bench, bench)]
fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
let parallel_fetches = 32;
let mut store = TestStore(hashmap! {});
// build up the DAG
@ -78,7 +77,6 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
&auth_chain_sets,
&fetch,
&exists,
parallel_fetches,
)
.await
{
@ -91,7 +89,6 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
#[cfg(conduwuit_bench)]
#[cfg_attr(conduwuit_bench, bench)]
fn resolve_deeper_event_set(c: &mut test::Bencher) {
let parallel_fetches = 32;
let mut inner = INITIAL_EVENTS();
let ban = BAN_STATE_SET();
@ -153,7 +150,6 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) {
&auth_chain_sets,
&fetch,
&exists,
parallel_fetches,
)
.await
{
@ -190,7 +186,11 @@ impl<E: Event + Clone> TestStore<E> {
}
/// Returns a Vec of the related auth events to the given `event`.
fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec<E::Id>) -> Result<HashSet<E::Id>> {
fn auth_event_ids(
&self,
room_id: &RoomId,
event_ids: Vec<OwnedEventId>,
) -> Result<HashSet<OwnedEventId>> {
let mut result = HashSet::new();
let mut stack = event_ids;
@ -216,8 +216,8 @@ impl<E: Event + Clone> TestStore<E> {
fn auth_chain_diff(
&self,
room_id: &RoomId,
event_ids: Vec<Vec<E::Id>>,
) -> Result<Vec<E::Id>> {
event_ids: Vec<Vec<OwnedEventId>>,
) -> Result<Vec<OwnedEventId>> {
let mut auth_chain_sets = vec![];
for ids in event_ids {
// TODO state store `auth_event_ids` returns self in the event ids list
@ -238,7 +238,7 @@ impl<E: Event + Clone> TestStore<E> {
Ok(auth_chain_sets
.into_iter()
.flatten()
.filter(|id| !common.contains(id.borrow()))
.filter(|id| !common.contains(id))
.collect())
} else {
Ok(vec![])
@ -565,7 +565,7 @@ impl EventTypeExt for &TimelineEventType {
mod event {
use ruma::{
MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
events::{TimelineEventType, pdu::Pdu},
};
use serde::{Deserialize, Serialize};
@ -574,9 +574,7 @@ mod event {
use super::Event;
impl Event for PduEvent {
type Id = OwnedEventId;
fn event_id(&self) -> &Self::Id { &self.event_id }
fn event_id(&self) -> &EventId { &self.event_id }
fn room_id(&self) -> &RoomId {
match &self.rest {
@ -632,28 +630,30 @@ mod event {
}
}
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + Send + '_> {
match &self.rest {
| Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)),
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()),
| Pdu::RoomV1Pdu(ev) =>
Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())),
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)),
#[cfg(not(feature = "unstable-exhaustive-types"))]
| _ => unreachable!("new PDU version"),
}
}
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + Send + '_> {
match &self.rest {
| Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)),
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()),
| Pdu::RoomV1Pdu(ev) =>
Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())),
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)),
#[cfg(not(feature = "unstable-exhaustive-types"))]
| _ => unreachable!("new PDU version"),
}
}
fn redacts(&self) -> Option<&Self::Id> {
fn redacts(&self) -> Option<&EventId> {
match &self.rest {
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(),
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(),
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(),
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(),
#[cfg(not(feature = "unstable-exhaustive-types"))]
| _ => unreachable!("new PDU version"),
}

View file

@ -133,7 +133,7 @@ pub fn auth_types_for_event(
level = "debug",
skip_all,
fields(
event_id = incoming_event.event_id().borrow().as_str()
event_id = incoming_event.event_id().as_str(),
)
)]
pub async fn auth_check<F, Fut, Fetched, Incoming>(
@ -259,7 +259,7 @@ where
// 3. If event does not have m.room.create in auth_events reject
if !incoming_event
.auth_events()
.any(|id| id.borrow() == room_create_event.event_id().borrow())
.any(|id| id == room_create_event.event_id())
{
warn!("no m.room.create event in auth events");
return Ok(false);
@ -638,7 +638,7 @@ fn valid_membership_change(
warn!(?target_user_membership_event_id, "Banned user can't join");
false
} else if (join_rules == JoinRule::Invite
|| room_version.allow_knocking && join_rules == JoinRule::Knock)
|| room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_))))
// If the join_rule is invite then allow if membership state is invite or join
&& (target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Invite)
@ -1021,11 +1021,11 @@ fn check_redaction(
// If the domain of the event_id of the event being redacted is the same as the
// domain of the event_id of the m.room.redaction, allow
if redaction_event.event_id().borrow().server_name()
if redaction_event.event_id().server_name()
== redaction_event
.redacts()
.as_ref()
.and_then(|&id| id.borrow().server_name())
.and_then(|&id| id.server_name())
{
debug!("redaction event allowed via room version 1 rules");
return Ok(true);

View file

@ -20,7 +20,7 @@ use std::{
use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future};
use ruma::{
EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId,
EventId, Int, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId,
events::{
StateEventType, TimelineEventType,
room::member::{MembershipState, RoomMemberEventContent},
@ -39,9 +39,7 @@ use crate::{
debug, debug_error,
matrix::{event::Event, pdu::StateKey},
trace,
utils::stream::{
BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryReadyExt, WidebandExt,
},
utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, WidebandExt},
warn,
};
@ -69,9 +67,6 @@ type Result<T, E = Error> = crate::Result<T, E>;
/// * `event_fetch` - Any event not found in the `event_map` will defer to this
/// closure to find the event.
///
/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight
/// for any given operation.
///
/// ## Invariants
///
/// The caller of `resolve` must ensure that all the events are from the same
@ -82,21 +77,19 @@ type Result<T, E = Error> = crate::Result<T, E>;
pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>(
room_version: &RoomVersionId,
state_sets: Sets,
auth_chain_sets: &'a [HashSet<E::Id, Hasher>],
auth_chain_sets: &'a [HashSet<OwnedEventId, Hasher>],
event_fetch: &Fetch,
event_exists: &Exists,
parallel_fetches: usize,
) -> Result<StateMap<E::Id>>
) -> Result<StateMap<OwnedEventId>>
where
Fetch: Fn(E::Id) -> FetchFut + Sync,
Fetch: Fn(OwnedEventId) -> FetchFut + Sync,
FetchFut: Future<Output = Option<E>> + Send,
Exists: Fn(E::Id) -> ExistsFut + Sync,
Exists: Fn(OwnedEventId) -> ExistsFut + Sync,
ExistsFut: Future<Output = bool> + Send,
Sets: IntoIterator<IntoIter = SetIter> + Send,
SetIter: Iterator<Item = &'a StateMap<E::Id>> + Clone + Send,
SetIter: Iterator<Item = &'a StateMap<OwnedEventId>> + Clone + Send,
Hasher: BuildHasher + Send + Sync,
E: Event + Clone + Send + Sync,
E::Id: Borrow<EventId> + Send + Sync,
for<'b> &'b E: Send,
{
debug!("State resolution starting");
@ -147,13 +140,8 @@ where
// Sort the control events based on power_level/clock/event_id and
// outgoing/incoming edges
let sorted_control_levels = reverse_topological_power_sort(
control_events,
&all_conflicted,
&event_fetch,
parallel_fetches,
)
.await?;
let sorted_control_levels =
reverse_topological_power_sort(control_events, &all_conflicted, &event_fetch).await?;
debug!(count = sorted_control_levels.len(), "power events");
trace!(list = ?sorted_control_levels, "sorted power events");
@ -162,7 +150,7 @@ where
// Sequentially auth check each control event.
let resolved_control = iterative_auth_check(
&room_version,
sorted_control_levels.iter().stream(),
sorted_control_levels.iter().stream().map(AsRef::as_ref),
clean.clone(),
&event_fetch,
)
@ -179,7 +167,7 @@ where
// that failed auth
let events_to_resolve: Vec<_> = all_conflicted
.iter()
.filter(|&id| !deduped_power_ev.contains(id.borrow()))
.filter(|&id| !deduped_power_ev.contains(id))
.cloned()
.collect();
@ -199,7 +187,7 @@ where
let mut resolved_state = iterative_auth_check(
&room_version,
sorted_left_events.iter().stream(),
sorted_left_events.iter().stream().map(AsRef::as_ref),
resolved_control, // The control events are added to the final resolved state
&event_fetch,
)
@ -292,16 +280,14 @@ where
/// earlier (further back in time) origin server timestamp.
#[tracing::instrument(level = "debug", skip_all)]
async fn reverse_topological_power_sort<E, F, Fut>(
events_to_sort: Vec<E::Id>,
auth_diff: &HashSet<E::Id>,
events_to_sort: Vec<OwnedEventId>,
auth_diff: &HashSet<OwnedEventId>,
fetch_event: &F,
parallel_fetches: usize,
) -> Result<Vec<E::Id>>
) -> Result<Vec<OwnedEventId>>
where
F: Fn(E::Id) -> Fut + Sync,
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E: Event + Send + Sync,
E::Id: Borrow<EventId> + Send + Sync,
{
debug!("reverse topological sort of power events");
@ -311,35 +297,36 @@ where
}
// This is used in the `key_fn` passed to the lexico_topo_sort fn
let event_to_pl = graph
let event_to_pl: HashMap<_, _> = graph
.keys()
.cloned()
.stream()
.map(|event_id| {
get_power_level_for_sender(event_id.clone(), fetch_event)
.map(move |res| res.map(|pl| (event_id, pl)))
.broad_filter_map(async |event_id| {
let pl = get_power_level_for_sender(&event_id, fetch_event)
.await
.ok()?;
Some((event_id, pl))
})
.buffer_unordered(parallel_fetches)
.ready_try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| {
.inspect(|(event_id, pl)| {
debug!(
event_id = event_id.borrow().as_str(),
power_level = i64::from(pl),
event_id = event_id.as_str(),
power_level = i64::from(*pl),
"found the power level of an event's sender",
);
event_to_pl.insert(event_id.clone(), pl);
Ok(event_to_pl)
})
.collect()
.boxed()
.await?;
.await;
let event_to_pl = &event_to_pl;
let fetcher = |event_id: E::Id| async move {
let fetcher = async |event_id: OwnedEventId| {
let pl = *event_to_pl
.get(event_id.borrow())
.get(&event_id)
.ok_or_else(|| Error::NotFound(String::new()))?;
let ev = fetch_event(event_id)
.await
.ok_or_else(|| Error::NotFound(String::new()))?;
Ok((pl, ev.origin_server_ts()))
};
@ -476,18 +463,17 @@ where
/// the eventId at the eventId's generation (we walk backwards to `EventId`s
/// most recent previous power level event).
async fn get_power_level_for_sender<E, F, Fut>(
event_id: E::Id,
event_id: &EventId,
fetch_event: &F,
) -> serde_json::Result<Int>
where
F: Fn(E::Id) -> Fut + Sync,
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E: Event + Send,
E::Id: Borrow<EventId> + Send,
{
debug!("fetch event ({event_id}) senders power level");
let event = fetch_event(event_id).await;
let event = fetch_event(event_id.to_owned()).await;
let auth_events = event.as_ref().map(Event::auth_events);
@ -495,7 +481,7 @@ where
.into_iter()
.flatten()
.stream()
.broadn_filter_map(5, |aid| fetch_event(aid.clone()))
.broadn_filter_map(5, |aid| fetch_event(aid.to_owned()))
.ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, ""))
.await;
@ -528,14 +514,13 @@ where
async fn iterative_auth_check<'a, E, F, Fut, S>(
room_version: &RoomVersion,
events_to_check: S,
unconflicted_state: StateMap<E::Id>,
unconflicted_state: StateMap<OwnedEventId>,
fetch_event: &F,
) -> Result<StateMap<E::Id>>
) -> Result<StateMap<OwnedEventId>>
where
F: Fn(E::Id) -> Fut + Sync,
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E::Id: Borrow<EventId> + Clone + Eq + Ord + Send + Sync + 'a,
S: Stream<Item = &'a E::Id> + Send + 'a,
S: Stream<Item = &'a EventId> + Send + 'a,
E: Event + Clone + Send + Sync,
{
debug!("starting iterative auth check");
@ -543,7 +528,7 @@ where
let events_to_check: Vec<_> = events_to_check
.map(Result::Ok)
.broad_and_then(async |event_id| {
fetch_event(event_id.clone())
fetch_event(event_id.to_owned())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}")))
})
@ -551,16 +536,16 @@ where
.boxed()
.await?;
let auth_event_ids: HashSet<E::Id> = events_to_check
let auth_event_ids: HashSet<OwnedEventId> = events_to_check
.iter()
.flat_map(|event: &E| event.auth_events().map(Clone::clone))
.flat_map(|event: &E| event.auth_events().map(ToOwned::to_owned))
.collect();
let auth_events: HashMap<E::Id, E> = auth_event_ids
let auth_events: HashMap<OwnedEventId, E> = auth_event_ids
.into_iter()
.stream()
.broad_filter_map(fetch_event)
.map(|auth_event| (auth_event.event_id().clone(), auth_event))
.map(|auth_event| (auth_event.event_id().to_owned(), auth_event))
.collect()
.boxed()
.await;
@ -581,7 +566,7 @@ where
let mut auth_state = StateMap::new();
for aid in event.auth_events() {
if let Some(ev) = auth_events.get(aid.borrow()) {
if let Some(ev) = auth_events.get(aid) {
//TODO: synapse checks "rejected_reason" which is most likely related to
// soft-failing
auth_state.insert(
@ -592,7 +577,7 @@ where
ev.clone(),
);
} else {
warn!(event_id = aid.borrow().as_str(), "missing auth event");
warn!(event_id = aid.as_str(), "missing auth event");
}
}
@ -601,7 +586,7 @@ where
.stream()
.ready_filter_map(|key| Some((key, resolved_state.get(key)?)))
.filter_map(|(key, ev_id)| async move {
if let Some(event) = auth_events.get(ev_id.borrow()) {
if let Some(event) = auth_events.get(ev_id) {
Some((key, event.clone()))
} else {
Some((key, fetch_event(ev_id.clone()).await?))
@ -633,7 +618,7 @@ where
// add event to resolved state map
resolved_state.insert(
event.event_type().with_state_key(state_key),
event.event_id().clone(),
event.event_id().to_owned(),
);
},
| Ok(false) => {
@ -660,15 +645,14 @@ where
/// level as a parent) will be marked as depth 1. depth 1 is "older" than depth
/// 0.
async fn mainline_sort<E, F, Fut>(
to_sort: &[E::Id],
resolved_power_level: Option<E::Id>,
to_sort: &[OwnedEventId],
resolved_power_level: Option<OwnedEventId>,
fetch_event: &F,
) -> Result<Vec<E::Id>>
) -> Result<Vec<OwnedEventId>>
where
F: Fn(E::Id) -> Fut + Sync,
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E: Event + Clone + Send + Sync,
E::Id: Borrow<EventId> + Clone + Send + Sync,
{
debug!("mainline sort of events");
@ -688,7 +672,7 @@ where
pl = None;
for aid in event.auth_events() {
let ev = fetch_event(aid.clone())
let ev = fetch_event(aid.to_owned())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
@ -734,26 +718,25 @@ where
/// that has an associated mainline depth.
async fn get_mainline_depth<E, F, Fut>(
mut event: Option<E>,
mainline_map: &HashMap<E::Id, usize>,
mainline_map: &HashMap<OwnedEventId, usize>,
fetch_event: &F,
) -> Result<usize>
where
F: Fn(E::Id) -> Fut + Sync,
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E: Event + Send + Sync,
E::Id: Borrow<EventId> + Send + Sync,
{
while let Some(sort_ev) = event {
debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline");
debug!(event_id = sort_ev.event_id().as_str(), "mainline");
let id = sort_ev.event_id();
if let Some(depth) = mainline_map.get(id.borrow()) {
if let Some(depth) = mainline_map.get(id) {
return Ok(*depth);
}
event = None;
for aid in sort_ev.auth_events() {
let aev = fetch_event(aid.clone())
let aev = fetch_event(aid.to_owned())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
@ -768,15 +751,14 @@ where
}
async fn add_event_and_auth_chain_to_graph<E, F, Fut>(
graph: &mut HashMap<E::Id, HashSet<E::Id>>,
event_id: E::Id,
auth_diff: &HashSet<E::Id>,
graph: &mut HashMap<OwnedEventId, HashSet<OwnedEventId>>,
event_id: OwnedEventId,
auth_diff: &HashSet<OwnedEventId>,
fetch_event: &F,
) where
F: Fn(E::Id) -> Fut + Sync,
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E: Event + Send + Sync,
E::Id: Borrow<EventId> + Clone + Send + Sync,
{
let mut state = vec![event_id];
while let Some(eid) = state.pop() {
@ -786,26 +768,27 @@ async fn add_event_and_auth_chain_to_graph<E, F, Fut>(
// Prefer the store to event as the store filters dedups the events
for aid in auth_events {
if auth_diff.contains(aid.borrow()) {
if !graph.contains_key(aid.borrow()) {
if auth_diff.contains(aid) {
if !graph.contains_key(aid) {
state.push(aid.to_owned());
}
// We just inserted this at the start of the while loop
graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned());
graph
.get_mut(&eid)
.expect("We just inserted this at the start of the while loop")
.insert(aid.to_owned());
}
}
}
}
async fn is_power_event_id<E, F, Fut>(event_id: &E::Id, fetch: &F) -> bool
async fn is_power_event_id<E, F, Fut>(event_id: &EventId, fetch: &F) -> bool
where
F: Fn(E::Id) -> Fut + Sync,
F: Fn(OwnedEventId) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E: Event + Send,
E::Id: Borrow<EventId> + Send + Sync,
{
match fetch(event_id.clone()).await.as_ref() {
match fetch(event_id.to_owned()).await.as_ref() {
| Some(state) => is_power_event(state),
| _ => false,
}
@ -909,13 +892,13 @@ mod tests {
let fetcher = |id| ready(events.get(&id).cloned());
let sorted_power_events =
super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1)
super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher)
.await
.unwrap();
let resolved_power = super::iterative_auth_check(
&RoomVersion::V6,
sorted_power_events.iter().stream(),
sorted_power_events.iter().map(AsRef::as_ref).stream(),
HashMap::new(), // unconflicted events
&fetcher,
)
@ -1300,7 +1283,7 @@ mod tests {
let ev_map = store.0.clone();
let fetcher = |id| ready(ev_map.get(&id).cloned());
let exists = |id: <PduEvent as Event>::Id| ready(ev_map.get(&*id).is_some());
let exists = |id: OwnedEventId| ready(ev_map.get(&*id).is_some());
let state_sets = [state_at_bob, state_at_charlie];
let auth_chain: Vec<_> = state_sets
@ -1312,19 +1295,13 @@ mod tests {
})
.collect();
let resolved = match super::resolve(
&RoomVersionId::V2,
&state_sets,
&auth_chain,
&fetcher,
&exists,
1,
)
.await
{
| Ok(state) => state,
| Err(e) => panic!("{e}"),
};
let resolved =
match super::resolve(&RoomVersionId::V2, &state_sets, &auth_chain, &fetcher, &exists)
.await
{
| Ok(state) => state,
| Err(e) => panic!("{e}"),
};
assert_eq!(expected, resolved);
}
@ -1429,21 +1406,15 @@ mod tests {
})
.collect();
let fetcher = |id: <PduEvent as Event>::Id| ready(ev_map.get(&id).cloned());
let exists = |id: <PduEvent as Event>::Id| ready(ev_map.get(&id).is_some());
let resolved = match super::resolve(
&RoomVersionId::V6,
&state_sets,
&auth_chain,
&fetcher,
&exists,
1,
)
.await
{
| Ok(state) => state,
| Err(e) => panic!("{e}"),
};
let fetcher = |id: OwnedEventId| ready(ev_map.get(&id).cloned());
let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some());
let resolved =
match super::resolve(&RoomVersionId::V6, &state_sets, &auth_chain, &fetcher, &exists)
.await
{
| Ok(state) => state,
| Err(e) => panic!("{e}"),
};
debug!(
resolved = ?resolved

View file

@ -133,17 +133,11 @@ pub(crate) async fn do_check(
.collect();
let event_map = &event_map;
let fetch = |id: <PduEvent as Event>::Id| ready(event_map.get(&id).cloned());
let exists = |id: <PduEvent as Event>::Id| ready(event_map.get(&id).is_some());
let resolved = super::resolve(
&RoomVersionId::V6,
state_sets,
&auth_chain_sets,
&fetch,
&exists,
1,
)
.await;
let fetch = |id: OwnedEventId| ready(event_map.get(&id).cloned());
let exists = |id: OwnedEventId| ready(event_map.get(&id).is_some());
let resolved =
super::resolve(&RoomVersionId::V6, state_sets, &auth_chain_sets, &fetch, &exists)
.await;
match resolved {
| Ok(state) => state,
@ -247,8 +241,8 @@ impl<E: Event + Clone> TestStore<E> {
pub(crate) fn auth_event_ids(
&self,
room_id: &RoomId,
event_ids: Vec<E::Id>,
) -> Result<HashSet<E::Id>> {
event_ids: Vec<OwnedEventId>,
) -> Result<HashSet<OwnedEventId>> {
let mut result = HashSet::new();
let mut stack = event_ids;
@ -584,7 +578,7 @@ pub(crate) fn INITIAL_EDGES() -> Vec<OwnedEventId> {
pub(crate) mod event {
use ruma::{
MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
events::{TimelineEventType, pdu::Pdu},
};
use serde::{Deserialize, Serialize};
@ -593,9 +587,7 @@ pub(crate) mod event {
use crate::Event;
impl Event for PduEvent {
type Id = OwnedEventId;
fn event_id(&self) -> &Self::Id { &self.event_id }
fn event_id(&self) -> &EventId { &self.event_id }
fn room_id(&self) -> &RoomId {
match &self.rest {
@ -652,29 +644,31 @@ pub(crate) mod event {
}
#[allow(refining_impl_trait)]
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + Send + '_> {
match &self.rest {
| Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)),
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()),
| Pdu::RoomV1Pdu(ev) =>
Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())),
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)),
#[allow(unreachable_patterns)]
| _ => unreachable!("new PDU version"),
}
}
#[allow(refining_impl_trait)]
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + Send + '_> {
match &self.rest {
| Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)),
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()),
| Pdu::RoomV1Pdu(ev) =>
Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())),
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)),
#[allow(unreachable_patterns)]
| _ => unreachable!("new PDU version"),
}
}
fn redacts(&self) -> Option<&Self::Id> {
fn redacts(&self) -> Option<&EventId> {
match &self.rest {
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(),
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(),
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(),
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(),
#[allow(unreachable_patterns)]
| _ => unreachable!("new PDU version"),
}

View file

@ -29,7 +29,7 @@ fn descriptor_cf_options(
set_table_options(&mut opts, &desc, cache)?;
opts.set_min_write_buffer_number(1);
opts.set_max_write_buffer_number(2);
opts.set_max_write_buffer_number(3);
opts.set_write_buffer_size(desc.write_size);
opts.set_target_file_size_base(desc.file_size);

View file

@ -98,12 +98,7 @@ pub(super) fn shutdown(server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
Level::INFO
};
debug!(
timeout = ?SHUTDOWN_TIMEOUT,
"Waiting for runtime..."
);
runtime.shutdown_timeout(SHUTDOWN_TIMEOUT);
wait_shutdown(server, runtime);
let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default();
event!(LEVEL, ?runtime_metrics, "Final runtime metrics");
@ -111,13 +106,23 @@ pub(super) fn shutdown(server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
#[cfg(not(tokio_unstable))]
#[tracing::instrument(name = "stop", level = "info", skip_all)]
pub(super) fn shutdown(_server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
pub(super) fn shutdown(server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
wait_shutdown(server, runtime);
}
fn wait_shutdown(_server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
debug!(
timeout = ?SHUTDOWN_TIMEOUT,
"Waiting for runtime..."
);
runtime.shutdown_timeout(SHUTDOWN_TIMEOUT);
// Join any jemalloc threads so they don't appear in use at exit.
#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))]
conduwuit_core::alloc::je::background_thread_enable(false)
.log_debug_err()
.ok();
}
#[tracing::instrument(

View file

@ -165,7 +165,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
.timeline
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomTopicEventContent {
topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name),
topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://continuwuity.org/", services.config.server_name),
}),
server_user,
&room_id,

View file

@ -4,7 +4,6 @@ mod execute;
mod grant;
use std::{
future::Future,
pin::Pin,
sync::{Arc, RwLock as StdRwLock, Weak},
};
@ -14,7 +13,7 @@ use conduwuit::{
Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder,
};
pub use create::create_admin_room;
use futures::{FutureExt, TryFutureExt};
use futures::{Future, FutureExt, TryFutureExt};
use loole::{Receiver, Sender};
use ruma::{
OwnedEventId, OwnedRoomId, RoomId, UserId,

View file

@ -306,28 +306,25 @@ impl super::Service {
#[tracing::instrument(name = "srv", level = "debug", skip(self))]
async fn query_srv_record(&self, hostname: &'_ str) -> Result<Option<FedDest>> {
let hostnames =
[format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")];
self.services.server.check_running()?;
for hostname in hostnames {
self.services.server.check_running()?;
debug!("querying SRV for {hostname:?}");
debug!("querying SRV for {hostname:?}");
let hostname = hostname.trim_end_matches('.');
match self.resolver.resolver.srv_lookup(hostname).await {
| Err(e) => Self::handle_resolve_error(&e, hostname)?,
| Ok(result) => {
return Ok(result.iter().next().map(|result| {
FedDest::Named(
result.target().to_string().trim_end_matches('.').to_owned(),
format!(":{}", result.port())
.as_str()
.try_into()
.unwrap_or_else(|_| FedDest::default_port()),
)
}));
},
}
let hostname_suffix = format!("_matrix-fed._tcp.{hostname}.");
let hostname = hostname_suffix.trim_end_matches('.');
match self.resolver.resolver.srv_lookup(hostname).await {
| Err(e) => Self::handle_resolve_error(&e, hostname)?,
| Ok(result) => {
return Ok(result.iter().next().map(|result| {
FedDest::Named(
result.target().to_string().trim_end_matches('.').to_owned(),
format!(":{}", result.port())
.as_str()
.try_into()
.unwrap_or_else(|_| FedDest::default_port()),
)
}));
},
}
Ok(None)

View file

@ -8,7 +8,7 @@ use conduwuit::{
Error, Result, err, implement,
state_res::{self, StateMap},
trace,
utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width},
utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt},
};
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join};
use ruma::{OwnedEventId, RoomId, RoomVersionId};
@ -112,14 +112,7 @@ where
{
let event_fetch = |event_id| self.event_fetch(event_id);
let event_exists = |event_id| self.event_exists(event_id);
state_res::resolve(
room_version,
state_sets,
auth_chain_sets,
&event_fetch,
&event_exists,
automatic_width(),
)
.map_err(|e| err!(error!("State resolution failed: {e:?}")))
.await
state_res::resolve(room_version, state_sets, auth_chain_sets, &event_fetch, &event_exists)
.map_err(|e| err!(error!("State resolution failed: {e:?}")))
.await
}