Compare commits

..

1 commit

Author SHA1 Message Date
Renovate Bot
b94ecea3f7 chore(deps): update https://github.com/actions/setup-node action to v5 2025-09-07 05:01:20 +00:00
19 changed files with 144 additions and 331 deletions

View file

@ -13,12 +13,6 @@ outputs:
slug: slug:
description: 'Combined OS slug (e.g. Ubuntu-22.04)' description: 'Combined OS slug (e.g. Ubuntu-22.04)'
value: ${{ steps.detect.outputs.slug }} value: ${{ steps.detect.outputs.slug }}
node_major:
description: 'Major version of Node.js if available (e.g. 22)'
value: ${{ steps.detect.outputs.node_major }}
node_version:
description: 'Full Node.js version if available (e.g. 22.19.0)'
value: ${{ steps.detect.outputs.node_version }}
runs: runs:
using: composite using: composite
@ -36,20 +30,7 @@ runs:
# Create combined slug # Create combined slug
OS_SLUG="${OS_NAME}-${OS_VERSION}" OS_SLUG="${OS_NAME}-${OS_VERSION}"
# Detect Node.js version if available # Set outputs
if command -v node >/dev/null 2>&1; then
NODE_VERSION=$(node --version | sed 's/v//')
NODE_MAJOR=$(echo $NODE_VERSION | cut -d. -f1)
echo "node_version=${NODE_VERSION}" >> $GITHUB_OUTPUT
echo "node_major=${NODE_MAJOR}" >> $GITHUB_OUTPUT
echo "🔍 Detected Node.js: v${NODE_VERSION}"
else
echo "node_version=" >> $GITHUB_OUTPUT
echo "node_major=" >> $GITHUB_OUTPUT
echo "🔍 Node.js not found"
fi
# Set OS outputs
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT

View file

@ -2,12 +2,18 @@ name: sccache
description: | description: |
Install sccache for caching builds in GitHub Actions. Install sccache for caching builds in GitHub Actions.
inputs:
token:
description: 'A Github PAT'
required: false
runs: runs:
using: composite using: composite
steps: steps:
- name: Install sccache - name: Install sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1 uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
with:
token: ${{ inputs.token }}
- name: Configure sccache - name: Configure sccache
uses: https://github.com/actions/github-script@v7 uses: https://github.com/actions/github-script@v7
with: with:

View file

@ -88,9 +88,19 @@ runs:
# Shared toolchain cache across all Rust versions # Shared toolchain cache across all Rust versions
key: toolchain-${{ steps.runner-os.outputs.slug }} key: toolchain-${{ steps.runner-os.outputs.slug }}
- name: Debug GitHub token availability
shell: bash
run: |
if [ -z "${{ inputs.github-token }}" ]; then
echo "⚠️ No GitHub token provided - sccache will use fallback download method"
else
echo "✅ GitHub token provided for sccache"
fi
- name: Setup sccache - name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1 uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
with:
token: ${{ inputs.github-token }}
- name: Cache build artifacts - name: Cache build artifacts
id: build-cache id: build-cache

View file

@ -49,23 +49,10 @@ jobs:
cp ./docs/static/_headers ./public/_headers cp ./docs/static/_headers ./public/_headers
echo "Copied .well-known files and _headers to ./public" echo "Copied .well-known files and _headers to ./public"
- name: Detect runner environment
id: runner-env
uses: ./.forgejo/actions/detect-runner-os
- name: Setup Node.js - name: Setup Node.js
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
uses: https://github.com/actions/setup-node@v5 uses: https://github.com/actions/setup-node@v5
with: with:
node-version: 22 node-version: 20
- name: Cache npm dependencies
uses: actions/cache@v3
with:
path: ~/.npm
key: ${{ steps.runner-env.outputs.slug }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ steps.runner-env.outputs.slug }}-node-
- name: Install dependencies - name: Install dependencies
run: npm install --save-dev wrangler@latest run: npm install --save-dev wrangler@latest

View file

@ -11,6 +11,7 @@ on:
- ".gitignore" - ".gitignore"
- "renovate.json" - "renovate.json"
- "pkg/**" - "pkg/**"
- "docker/**"
- "docs/**" - "docs/**"
push: push:
branches: branches:
@ -22,6 +23,7 @@ on:
- ".gitignore" - ".gitignore"
- "renovate.json" - "renovate.json"
- "pkg/**" - "pkg/**"
- "docker/**"
- "docs/**" - "docs/**"
# Allows you to run this workflow manually from the Actions tab # Allows you to run this workflow manually from the Actions tab
workflow_dispatch: workflow_dispatch:
@ -53,9 +55,6 @@ jobs:
let images = [] let images = []
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") { if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
images.push(builtinImage) images.push(builtinImage)
} else {
// Fallback to official registry for forks/PRs without credentials
images.push('forgejo.ellis.link/continuwuation/continuwuity')
} }
core.setOutput('images', images.join("\n")) core.setOutput('images', images.join("\n"))
core.setOutput('images_list', images.join(",")) core.setOutput('images_list', images.join(","))
@ -101,7 +100,6 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- name: Install rust - name: Install rust
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: rust-toolchain id: rust-toolchain
uses: ./.forgejo/actions/rust-toolchain uses: ./.forgejo/actions/rust-toolchain
@ -112,11 +110,9 @@ jobs:
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }} driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }} endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
- name: Set up QEMU - name: Set up QEMU
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v3
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Login to builtin registry - name: Login to builtin registry
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: ${{ env.BUILTIN_REGISTRY }} registry: ${{ env.BUILTIN_REGISTRY }}
@ -142,21 +138,15 @@ jobs:
run: | run: |
calculatedSha=$(git rev-parse --short ${{ github.sha }}) calculatedSha=$(git rev-parse --short ${{ github.sha }})
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
echo "Short SHA: $calculatedSha"
- name: Get Git commit timestamps - name: Get Git commit timestamps
run: | run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
timestamp=$(git log -1 --pretty=%ct)
echo "TIMESTAMP=$timestamp" >> $GITHUB_ENV
echo "Commit timestamp: $timestamp"
- uses: ./.forgejo/actions/timelord - uses: ./.forgejo/actions/timelord
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
with: with:
key: timelord-v0 key: timelord-v0
path: . path: .
- name: Cache Rust registry - name: Cache Rust registry
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: actions/cache@v3 uses: actions/cache@v3
with: with:
path: | path: |
@ -166,7 +156,6 @@ jobs:
.cargo/registry/src .cargo/registry/src
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }} key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
- name: Cache cargo target - name: Cache cargo target
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-cargo-target id: cache-cargo-target
uses: actions/cache@v3 uses: actions/cache@v3
with: with:
@ -174,7 +163,6 @@ jobs:
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }} cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}} key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
- name: Cache apt cache - name: Cache apt cache
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-apt id: cache-apt
uses: actions/cache@v3 uses: actions/cache@v3
with: with:
@ -182,7 +170,6 @@ jobs:
var-cache-apt-${{ matrix.slug }} var-cache-apt-${{ matrix.slug }}
key: var-cache-apt-${{ matrix.slug }} key: var-cache-apt-${{ matrix.slug }}
- name: Cache apt lib - name: Cache apt lib
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
id: cache-apt-lib id: cache-apt-lib
uses: actions/cache@v3 uses: actions/cache@v3
with: with:
@ -190,7 +177,6 @@ jobs:
var-lib-apt-${{ matrix.slug }} var-lib-apt-${{ matrix.slug }}
key: var-lib-apt-${{ matrix.slug }} key: var-lib-apt-${{ matrix.slug }}
- name: inject cache into docker - name: inject cache into docker
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0 uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
with: with:
cache-map: | cache-map: |
@ -213,7 +199,7 @@ jobs:
context: . context: .
file: "docker/Dockerfile" file: "docker/Dockerfile"
build-args: | build-args: |
GIT_COMMIT_HASH=${{ github.sha }} GIT_COMMIT_HASH=${{ github.sha }})
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }} GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
GIT_REMOTE_URL=${{github.event.repository.html_url }} GIT_REMOTE_URL=${{github.event.repository.html_url }}
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }} GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
@ -223,23 +209,27 @@ jobs:
cache-from: type=gha cache-from: type=gha
# cache-to: type=gha,mode=max # cache-to: type=gha,mode=max
sbom: true sbom: true
outputs: | outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', needs.define-variables.outputs.images_list) || format('type=image,"name={0}",push=false', needs.define-variables.outputs.images_list) }}
type=local,dest=/tmp/binaries
env: env:
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }} SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
# For publishing multi-platform manifests # For publishing multi-platform manifests
- name: Export digest - name: Export digest
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
run: | run: |
mkdir -p /tmp/digests mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}" digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}" touch "/tmp/digests/${digest#sha256:}"
# Binary extracted via local output for all builds - name: Extract binary from container (image)
- name: Rename extracted binary id: extract-binary-image
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }} run: |
mkdir -p /tmp/binaries
digest="${{ steps.build.outputs.digest }}"
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.define-variables.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
- name: Extract binary from container (copy)
run: docker cp ${{ steps.extract-binary-image.outputs.container_id }}:/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
- name: Extract binary from container (cleanup)
run: docker rm ${{ steps.extract-binary-image.outputs.container_id }}
- name: Upload binary artifact - name: Upload binary artifact
uses: forgejo/upload-artifact@v4 uses: forgejo/upload-artifact@v4
@ -249,7 +239,6 @@ jobs:
if-no-files-found: error if-no-files-found: error
- name: Upload digest - name: Upload digest
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: forgejo/upload-artifact@v4 uses: forgejo/upload-artifact@v4
with: with:
name: digests-${{ matrix.slug }} name: digests-${{ matrix.slug }}
@ -262,7 +251,6 @@ jobs:
needs: [define-variables, build-image] needs: [define-variables, build-image]
steps: steps:
- name: Download digests - name: Download digests
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: forgejo/download-artifact@v4 uses: forgejo/download-artifact@v4
with: with:
path: /tmp/digests path: /tmp/digests
@ -270,7 +258,6 @@ jobs:
merge-multiple: true merge-multiple: true
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Login to builtin registry - name: Login to builtin registry
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: ${{ env.BUILTIN_REGISTRY }} registry: ${{ env.BUILTIN_REGISTRY }}
@ -278,7 +265,6 @@ jobs:
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx - name: Set up Docker Buildx
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
with: with:
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125) # Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
@ -286,7 +272,6 @@ jobs:
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }} endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
- name: Extract metadata (tags) for Docker - name: Extract metadata (tags) for Docker
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
id: meta id: meta
uses: docker/metadata-action@v5 uses: docker/metadata-action@v5
with: with:
@ -304,7 +289,6 @@ jobs:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index DOCKER_METADATA_ANNOTATIONS_LEVELS: index
- name: Create manifest list and push - name: Create manifest list and push
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
working-directory: /tmp/digests working-directory: /tmp/digests
env: env:
IMAGES: ${{needs.define-variables.outputs.images}} IMAGES: ${{needs.define-variables.outputs.images}}
@ -322,7 +306,6 @@ jobs:
done done
- name: Inspect image - name: Inspect image
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
env: env:
IMAGES: ${{needs.define-variables.outputs.images}} IMAGES: ${{needs.define-variables.outputs.images}}
shell: bash shell: bash

View file

@ -7,6 +7,5 @@
"continuwuity", "continuwuity",
"homeserver", "homeserver",
"homeservers" "homeservers"
], ]
"rust-analyzer.cargo.features": ["full"]
} }

View file

@ -199,57 +199,32 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
EOF EOF
# Extract dynamically linked dependencies # Extract dynamically linked dependencies
RUN <<'DEPS_EOF' RUN <<EOF
set -o xtrace set -o xtrace
mkdir /out/libs /out/libs-root mkdir /out/libs
mkdir /out/libs-root
# Process each binary
for BINARY in /out/sbin/*; do for BINARY in /out/sbin/*; do
if lddtree_output=$(lddtree "$BINARY" 2>/dev/null) && [ -n "$lddtree_output" ]; then lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
echo "$lddtree_output" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | \
awk '{dest = ($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2; print "install -D " $1 " " dest}' | \
while read cmd; do eval "$cmd"; done
fi
done done
EOF
# Show what will be copied to runtime
echo "=== Libraries being copied to runtime image:"
find /out/libs* -type f 2>/dev/null | sort || echo "No libraries found"
DEPS_EOF
FROM ubuntu:latest AS prepper
# Create layer structure
RUN mkdir -p /layer1/etc/ssl/certs \
/layer2/usr/lib \
/layer3/sbin /layer3/sbom
# Copy SSL certs and root-path libraries to layer1 (ultra-stable)
COPY --from=base /etc/ssl/certs /layer1/etc/ssl/certs
COPY --from=builder /out/libs-root/ /layer1/
# Copy application libraries to layer2 (semi-stable)
COPY --from=builder /out/libs/ /layer2/usr/lib/
# Copy binaries and SBOM to layer3 (volatile)
COPY --from=builder /out/sbin/ /layer3/sbin/
COPY --from=builder /out/sbom/ /layer3/sbom/
# Fix permissions after copying
RUN chmod -R 755 /layer1 /layer2 /layer3
FROM scratch FROM scratch
WORKDIR / WORKDIR /
# Copy ultra-stable layer (SSL certs, system libraries) # Copy root certs for tls into image
COPY --from=prepper /layer1/ / # You can also mount the certs from the host
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
COPY --from=base /etc/ssl/certs /etc/ssl/certs
# Copy semi-stable layer (application libraries) # Copy our build
COPY --from=prepper /layer2/ / COPY --from=builder /out/sbin/ /sbin/
# Copy SBOM
COPY --from=builder /out/sbom/ /sbom/
# Copy volatile layer (binaries, SBOM) # Copy dynamic libraries to root
COPY --from=prepper /layer3/ / COPY --from=builder /out/libs-root/ /
COPY --from=builder /out/libs/ /usr/lib/
# Inform linker where to find libraries # Inform linker where to find libraries
ENV LD_LIBRARY_PATH=/usr/lib ENV LD_LIBRARY_PATH=/usr/lib

View file

@ -281,8 +281,15 @@ pub(super) async fn get_remote_pdu(
vec![(event_id, value, room_id)] vec![(event_id, value, room_id)]
}; };
info!("Attempting to handle event ID {event_id} as backfilled PDU");
self.services
.rooms
.timeline
.backfill_pdu(&server, response.pdu)
.await?;
let text = serde_json::to_string_pretty(&json)?; let text = serde_json::to_string_pretty(&json)?;
let msg = "Got PDU from specified server:"; let msg = "Got PDU from specified server and handled as backfilled";
write!(self, "{msg}. Event body:\n```json\n{text}\n```") write!(self, "{msg}. Event body:\n```json\n{text}\n```")
}, },
} }

View file

@ -57,5 +57,5 @@ pub(super) async fn pdus(
.try_collect() .try_collect()
.await?; .await?;
self.write_str(&format!("```\n{result:#?}\n```")).await self.write_str(&format!("{result:#?}")).await
} }

View file

@ -35,6 +35,7 @@ use ruma::{
}; };
use tracing::warn; use tracing::warn;
use super::utils::{count_to_token, parse_pagination_token as parse_token};
use crate::Ruma; use crate::Ruma;
/// list of safe and common non-state events to ignore if the user is ignored /// list of safe and common non-state events to ignore if the user is ignored
@ -84,14 +85,14 @@ pub(crate) async fn get_message_events_route(
let from: PduCount = body let from: PduCount = body
.from .from
.as_deref() .as_deref()
.map(str::parse) .map(parse_token)
.transpose()? .transpose()?
.unwrap_or_else(|| match body.dir { .unwrap_or_else(|| match body.dir {
| Direction::Forward => PduCount::min(), | Direction::Forward => PduCount::min(),
| Direction::Backward => PduCount::max(), | Direction::Backward => PduCount::max(),
}); });
let to: Option<PduCount> = body.to.as_deref().map(str::parse).transpose()?; let to: Option<PduCount> = body.to.as_deref().map(parse_token).transpose()?;
let limit: usize = body let limit: usize = body
.limit .limit
@ -180,8 +181,8 @@ pub(crate) async fn get_message_events_route(
.collect(); .collect();
Ok(get_message_events::v3::Response { Ok(get_message_events::v3::Response {
start: from.to_string(), start: count_to_token(from),
end: next_token.as_ref().map(PduCount::to_string), end: next_token.map(count_to_token),
chunk, chunk,
state, state,
}) })

View file

@ -37,6 +37,7 @@ pub(super) mod typing;
pub(super) mod unstable; pub(super) mod unstable;
pub(super) mod unversioned; pub(super) mod unversioned;
pub(super) mod user_directory; pub(super) mod user_directory;
pub(super) mod utils;
pub(super) mod voip; pub(super) mod voip;
pub(super) mod well_known; pub(super) mod well_known;

View file

@ -18,6 +18,7 @@ use ruma::{
events::{TimelineEventType, relation::RelationType}, events::{TimelineEventType, relation::RelationType},
}; };
use super::utils::{count_to_token, parse_pagination_token as parse_token};
use crate::Ruma; use crate::Ruma;
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
@ -110,14 +111,14 @@ async fn paginate_relations_with_filter(
dir: Direction, dir: Direction,
) -> Result<get_relating_events::v1::Response> { ) -> Result<get_relating_events::v1::Response> {
let start: PduCount = from let start: PduCount = from
.map(str::parse) .map(parse_token)
.transpose()? .transpose()?
.unwrap_or_else(|| match dir { .unwrap_or_else(|| match dir {
| Direction::Forward => PduCount::min(), | Direction::Forward => PduCount::min(),
| Direction::Backward => PduCount::max(), | Direction::Backward => PduCount::max(),
}); });
let to: Option<PduCount> = to.map(str::parse).transpose()?; let to: Option<PduCount> = to.map(parse_token).transpose()?;
// Use limit or else 30, with maximum 100 // Use limit or else 30, with maximum 100
let limit: usize = limit let limit: usize = limit
@ -192,7 +193,7 @@ async fn paginate_relations_with_filter(
| Direction::Forward => events.last(), | Direction::Forward => events.last(),
| Direction::Backward => events.first(), | Direction::Backward => events.first(),
} }
.map(|(count, _)| count.to_string()) .map(|(count, _)| count_to_token(*count))
} else { } else {
None None
}; };

View file

@ -18,7 +18,7 @@ pub(crate) async fn get_room_event_route(
let event = services let event = services
.rooms .rooms
.timeline .timeline
.get_remote_pdu(room_id, event_id) .get_pdu(event_id)
.map_err(|_| err!(Request(NotFound("Event {} not found.", event_id)))); .map_err(|_| err!(Request(NotFound("Event {} not found.", event_id))));
let visible = services let visible = services
@ -33,6 +33,11 @@ pub(crate) async fn get_room_event_route(
return Err!(Request(Forbidden("You don't have permission to view this event."))); return Err!(Request(Forbidden("You don't have permission to view this event.")));
} }
debug_assert!(
event.event_id() == event_id && event.room_id() == room_id,
"Fetched PDU must match requested"
);
event.add_age().ok(); event.add_age().ok();
Ok(get_room_event::v3::Response { event: event.into_format() }) Ok(get_room_event::v3::Response { event: event.into_format() })

28
src/api/client/utils.rs Normal file
View file

@ -0,0 +1,28 @@
use conduwuit::{
Result, err,
matrix::pdu::{PduCount, ShortEventId},
};
/// Parse a pagination token, trying ShortEventId first, then falling back to
/// PduCount
pub(crate) fn parse_pagination_token(token: &str) -> Result<PduCount> {
// Try parsing as ShortEventId first
if let Ok(shorteventid) = token.parse::<ShortEventId>() {
// ShortEventId maps directly to a PduCount in our database
Ok(PduCount::Normal(shorteventid))
} else if let Ok(count) = token.parse::<u64>() {
// Fallback to PduCount for backwards compatibility
Ok(PduCount::Normal(count))
} else if let Ok(count) = token.parse::<i64>() {
// Also handle negative counts for backfilled events
Ok(PduCount::from_signed(count))
} else {
Err(err!(Request(InvalidParam("Invalid pagination token"))))
}
}
/// Convert a PduCount to a token string (using the underlying ShortEventId)
pub(crate) fn count_to_token(count: PduCount) -> String {
// The PduCount's unsigned value IS the ShortEventId
count.into_unsigned().to_string()
}

View file

@ -1,13 +1,12 @@
#![allow(deprecated)] #![allow(deprecated)]
use std::{borrow::Borrow, time::Instant, vec}; use std::borrow::Borrow;
use axum::extract::State; use axum::extract::State;
use conduwuit::{ use conduwuit::{
Err, Event, Result, at, debug, err, info, Err, Result, at, err,
matrix::event::gen_event_id_canonical_json, matrix::event::gen_event_id_canonical_json,
trace, utils::stream::{IterStream, TryBroadbandExt},
utils::stream::{BroadbandExt, IterStream, TryBroadbandExt},
warn, warn,
}; };
use conduwuit_service::Services; use conduwuit_service::Services;
@ -26,14 +25,12 @@ use serde_json::value::{RawValue as RawJsonValue, to_raw_value};
use crate::Ruma; use crate::Ruma;
/// helper method for /send_join v1 and v2 /// helper method for /send_join v1 and v2
#[tracing::instrument(skip(services, pdu, omit_members), fields(room_id = room_id.as_str(), origin = origin.as_str()))]
async fn create_join_event( async fn create_join_event(
services: &Services, services: &Services,
origin: &ServerName, origin: &ServerName,
room_id: &RoomId, room_id: &RoomId,
pdu: &RawJsonValue, pdu: &RawJsonValue,
omit_members: bool, ) -> Result<create_join_event::v1::RoomState> {
) -> Result<create_join_event::v2::RoomState> {
if !services.rooms.metadata.exists(room_id).await { if !services.rooms.metadata.exists(room_id).await {
return Err!(Request(NotFound("Room is unknown to this server."))); return Err!(Request(NotFound("Room is unknown to this server.")));
} }
@ -56,10 +53,8 @@ async fn create_join_event(
// We do not add the event_id field to the pdu here because of signature and // We do not add the event_id field to the pdu here because of signature and
// hashes checks // hashes checks
trace!("Getting room version");
let room_version_id = services.rooms.state.get_room_version(room_id).await?; let room_version_id = services.rooms.state.get_room_version(room_id).await?;
trace!("Generating event ID and converting to canonical json");
let Ok((event_id, mut value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { let Ok((event_id, mut value)) = gen_event_id_canonical_json(pdu, &room_version_id) else {
// Event could not be converted to canonical json // Event could not be converted to canonical json
return Err!(Request(BadJson("Could not convert event to canonical json."))); return Err!(Request(BadJson("Could not convert event to canonical json.")));
@ -108,6 +103,7 @@ async fn create_join_event(
))); )));
} }
// ACL check sender user server name
let sender: OwnedUserId = serde_json::from_value( let sender: OwnedUserId = serde_json::from_value(
value value
.get("sender") .get("sender")
@ -117,6 +113,12 @@ async fn create_join_event(
) )
.map_err(|e| err!(Request(BadJson(warn!("sender property is not a valid user ID: {e}")))))?; .map_err(|e| err!(Request(BadJson(warn!("sender property is not a valid user ID: {e}")))))?;
services
.rooms
.event_handler
.acl_check(sender.server_name(), room_id)
.await?;
// check if origin server is trying to send for another server // check if origin server is trying to send for another server
if sender.server_name() != origin { if sender.server_name() != origin {
return Err!(Request(Forbidden("Not allowed to join on behalf of another server."))); return Err!(Request(Forbidden("Not allowed to join on behalf of another server.")));
@ -178,6 +180,11 @@ async fn create_join_event(
} }
} }
services
.server_keys
.hash_and_sign_event(&mut value, &room_version_id)
.map_err(|e| err!(Request(InvalidParam(warn!("Failed to sign send_join event: {e}")))))?;
let origin: OwnedServerName = serde_json::from_value( let origin: OwnedServerName = serde_json::from_value(
value value
.get("origin") .get("origin")
@ -187,12 +194,6 @@ async fn create_join_event(
) )
.map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?; .map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?;
trace!("Signing send_join event");
services
.server_keys
.hash_and_sign_event(&mut value, &room_version_id)
.map_err(|e| err!(Request(InvalidParam(warn!("Failed to sign send_join event: {e}")))))?;
let mutex_lock = services let mutex_lock = services
.rooms .rooms
.event_handler .event_handler
@ -200,7 +201,6 @@ async fn create_join_event(
.lock(room_id) .lock(room_id)
.await; .await;
trace!("Acquired send_join mutex, persisting join event");
let pdu_id = services let pdu_id = services
.rooms .rooms
.event_handler .event_handler
@ -210,7 +210,7 @@ async fn create_join_event(
.ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?;
drop(mutex_lock); drop(mutex_lock);
trace!("Fetching current state IDs");
let state_ids: Vec<OwnedEventId> = services let state_ids: Vec<OwnedEventId> = services
.rooms .rooms
.state_accessor .state_accessor
@ -219,23 +219,9 @@ async fn create_join_event(
.collect() .collect()
.await; .await;
trace!(%omit_members, "Constructing current state");
let state = state_ids let state = state_ids
.iter() .iter()
.try_stream() .try_stream()
.broad_filter_map(|event_id| async move {
if omit_members {
if let Ok(e) = event_id.as_ref() {
let pdu = services.rooms.timeline.get_pdu(e).await;
if pdu.is_ok_and(|p| p.kind().to_cow_str() == "m.room.member") {
trace!("omitting member event {e:?} from returned state");
// skip members
return None;
}
}
}
Some(event_id)
})
.broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id)) .broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id))
.broad_and_then(|pdu| { .broad_and_then(|pdu| {
services services
@ -248,7 +234,6 @@ async fn create_join_event(
.await?; .await?;
let starting_events = state_ids.iter().map(Borrow::borrow); let starting_events = state_ids.iter().map(Borrow::borrow);
trace!("Constructing auth chain");
let auth_chain = services let auth_chain = services
.rooms .rooms
.auth_chain .auth_chain
@ -265,37 +250,13 @@ async fn create_join_event(
.try_collect() .try_collect()
.boxed() .boxed()
.await?; .await?;
info!(fast_join = %omit_members, "Sending join event to other servers");
services.sending.send_pdu_room(room_id, &pdu_id).await?; services.sending.send_pdu_room(room_id, &pdu_id).await?;
debug!("Finished sending join event");
let servers_in_room: Option<Vec<_>> = if !omit_members { Ok(create_join_event::v1::RoomState {
None
} else {
trace!("Fetching list of servers in room");
let servers: Vec<String> = services
.rooms
.state_cache
.room_servers(room_id)
.map(|sn| sn.as_str().to_owned())
.collect()
.await;
// If there's no servers, just add us
let servers = if servers.is_empty() {
warn!("Failed to find any servers, adding our own server name as a last resort");
vec![services.globals.server_name().to_string()]
} else {
trace!("Found {} servers in room", servers.len());
servers
};
Some(servers)
};
debug!("Returning send_join data");
Ok(create_join_event::v2::RoomState {
auth_chain, auth_chain,
state, state,
event: to_raw_value(&CanonicalJsonValue::Object(value)).ok(), event: to_raw_value(&CanonicalJsonValue::Object(value)).ok(),
members_omitted: omit_members,
servers_in_room,
}) })
} }
@ -333,23 +294,11 @@ pub(crate) async fn create_join_event_v1_route(
} }
} }
let now = Instant::now(); let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu)
let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu, false)
.boxed() .boxed()
.await?; .await?;
let transformed = create_join_event::v1::RoomState {
auth_chain: room_state.auth_chain,
state: room_state.state,
event: room_state.event,
};
info!(
"Finished sending a join for {} in {} in {:?}",
body.origin(),
&body.room_id,
now.elapsed()
);
Ok(create_join_event::v1::Response { room_state: transformed }) Ok(create_join_event::v1::Response { room_state })
} }
/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}`
@ -380,17 +329,17 @@ pub(crate) async fn create_join_event_v2_route(
} }
} }
let now = Instant::now(); let create_join_event::v1::RoomState { auth_chain, state, event } =
let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu)
create_join_event(&services, body.origin(), &body.room_id, &body.pdu, body.omit_members)
.boxed() .boxed()
.await?; .await?;
info!( let room_state = create_join_event::v2::RoomState {
"Finished sending a join for {} in {} in {:?}", members_omitted: false,
body.origin(), auth_chain,
&body.room_id, state,
now.elapsed() event,
); servers_in_room: None,
};
Ok(create_join_event::v2::Response { room_state }) Ok(create_join_event::v2::Response { room_state })
} }

View file

@ -1,10 +1,6 @@
#![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)] #![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)]
use std::{ use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr};
cmp::Ordering,
fmt::{self, Display},
str::FromStr,
};
use ruma::api::Direction; use ruma::api::Direction;

View file

@ -452,7 +452,7 @@ async fn get_statediff(&self, shortstatehash: ShortStateHash) -> Result<StateDif
.ok() .ok()
.take_if(|parent| *parent != 0); .take_if(|parent| *parent != 0);
debug_assert!(value.len().is_multiple_of(STRIDE), "value not aligned to stride"); debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride");
let _num_values = value.len() / STRIDE; let _num_values = value.len() / STRIDE;
let mut add_mode = true; let mut add_mode = true;

View file

@ -1,8 +1,7 @@
use std::iter::once; use std::iter::once;
use conduwuit::{Err, PduEvent};
use conduwuit_core::{ use conduwuit_core::{
Result, debug, debug_warn, err, implement, info, Result, debug, debug_warn, implement, info,
matrix::{ matrix::{
event::Event, event::Event,
pdu::{PduCount, PduId, RawPduId}, pdu::{PduCount, PduId, RawPduId},
@ -12,7 +11,7 @@ use conduwuit_core::{
}; };
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use ruma::{ use ruma::{
CanonicalJsonObject, EventId, RoomId, ServerName, RoomId, ServerName,
api::federation, api::federation,
events::{ events::{
StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent, StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent,
@ -101,7 +100,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
.boxed(); .boxed();
while let Some(ref backfill_server) = servers.next().await { while let Some(ref backfill_server) = servers.next().await {
info!("Asking {backfill_server} for backfill in {room_id}"); info!("Asking {backfill_server} for backfill");
let response = self let response = self
.services .services
.sending .sending
@ -129,126 +128,10 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
} }
} }
warn!("No servers could backfill, but backfill was needed in room {room_id}"); info!("No servers could backfill, but backfill was needed in room {room_id}");
Ok(()) Ok(())
} }
#[implement(super::Service)]
#[tracing::instrument(name = "get_remote_pdu", level = "debug", skip(self))]
pub async fn get_remote_pdu(&self, room_id: &RoomId, event_id: &EventId) -> Result<PduEvent> {
let local = self.get_pdu(event_id).await;
if local.is_ok() {
// We already have this PDU, no need to backfill
debug!("We already have {event_id} in {room_id}, no need to backfill.");
return local;
}
debug!("Preparing to fetch event {event_id} in room {room_id} from remote servers.");
// Similar to backfill_if_required, but only for a single PDU
// Fetch a list of servers to try
if self
.services
.state_cache
.room_joined_count(room_id)
.await
.is_ok_and(|count| count <= 1)
&& !self
.services
.state_accessor
.is_world_readable(room_id)
.await
{
// Room is empty (1 user or none), there is no one that can backfill
return Err!(Request(NotFound("No one can backfill this PDU, room is empty.")));
}
let power_levels: RoomPowerLevelsEventContent = self
.services
.state_accessor
.room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "")
.await
.unwrap_or_default();
let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| {
if level > &power_levels.users_default && !self.services.globals.user_is_local(user_id) {
Some(user_id.server_name())
} else {
None
}
});
let canonical_room_alias_server = once(
self.services
.state_accessor
.get_canonical_alias(room_id)
.await,
)
.filter_map(Result::ok)
.map(|alias| alias.server_name().to_owned())
.stream();
let mut servers = room_mods
.stream()
.map(ToOwned::to_owned)
.chain(canonical_room_alias_server)
.chain(
self.services
.server
.config
.trusted_servers
.iter()
.map(ToOwned::to_owned)
.stream(),
)
.ready_filter(|server_name| !self.services.globals.server_is_ours(server_name))
.filter_map(|server_name| async move {
self.services
.state_cache
.server_in_room(&server_name, room_id)
.await
.then_some(server_name)
})
.boxed();
while let Some(ref backfill_server) = servers.next().await {
info!("Asking {backfill_server} for event {}", event_id);
let value = self
.services
.sending
.send_federation_request(backfill_server, federation::event::get_event::v1::Request {
event_id: event_id.to_owned(),
include_unredacted_content: Some(false),
})
.await
.and_then(|response| {
serde_json::from_str::<CanonicalJsonObject>(response.pdu.get()).map_err(|e| {
err!(BadServerResponse(debug_warn!(
"Error parsing incoming event {e:?} from {backfill_server}"
)))
})
});
let pdu = match value {
| Ok(value) => {
self.services
.event_handler
.handle_incoming_pdu(backfill_server, room_id, event_id, value, false)
.boxed()
.await?;
debug!("Successfully backfilled {event_id} from {backfill_server}");
Some(self.get_pdu(event_id).await)
},
| Err(e) => {
warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}");
None
},
};
if let Some(pdu) = pdu {
debug!("Fetched {event_id} from {backfill_server}");
return pdu;
}
}
Err!("No servers could be used to fetch {} in {}.", room_id, event_id)
}
#[implement(super::Service)] #[implement(super::Service)]
#[tracing::instrument(skip(self, pdu), level = "debug")] #[tracing::instrument(skip(self, pdu), level = "debug")]
pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box<RawJsonValue>) -> Result<()> { pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box<RawJsonValue>) -> Result<()> {

View file

@ -3,7 +3,8 @@ use std::{borrow::Borrow, sync::Arc};
use conduwuit::{ use conduwuit::{
Err, PduCount, PduEvent, Result, at, err, Err, PduCount, PduEvent, Result, at, err,
result::{LogErr, NotFound}, result::{LogErr, NotFound},
utils::{self, stream::TryReadyExt}, utils,
utils::stream::TryReadyExt,
}; };
use database::{Database, Deserialized, Json, KeyVal, Map}; use database::{Database, Deserialized, Json, KeyVal, Map};
use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut}; use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut};