mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-07-05 14:45:48 +02:00
Compare commits
14 commits
dd372f77eb
...
f840372c92
Author | SHA1 | Date | |
---|---|---|---|
|
f840372c92 | ||
|
f4b93e6fbf | ||
|
66e6d09acc | ||
|
9d9495d957 | ||
|
b735ad0cf2 | ||
|
7ab623d694 | ||
|
d5b3755242 | ||
|
3c44dccd65 | ||
|
b57be072c7 | ||
|
ea5dc8e09d | ||
|
b9d60c64e5 | ||
|
94ae824149 | ||
|
640714922b | ||
|
2b268fdaf3 |
17 changed files with 200 additions and 61 deletions
|
@ -19,11 +19,20 @@ outputs:
|
|||
rustc_version:
|
||||
description: The rustc version installed
|
||||
value: ${{ steps.rustc-version.outputs.version }}
|
||||
rustup_version:
|
||||
description: The rustup version installed
|
||||
value: ${{ steps.rustup-version.outputs.version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Check if rustup is already installed
|
||||
shell: bash
|
||||
id: rustup-version
|
||||
run: |
|
||||
echo "version=$(rustup --version)" >> $GITHUB_OUTPUT
|
||||
- name: Cache rustup toolchains
|
||||
if: steps.rustup-version.outputs.version == ''
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
|
@ -33,6 +42,7 @@ runs:
|
|||
# Requires repo to be cloned if toolchain is not specified
|
||||
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
||||
- name: Install Rust toolchain
|
||||
if: steps.rustup-version.outputs.version == ''
|
||||
shell: bash
|
||||
run: |
|
||||
if ! command -v rustup &> /dev/null ; then
|
||||
|
|
|
@ -57,7 +57,6 @@ jobs:
|
|||
|
||||
build-image:
|
||||
runs-on: dind
|
||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
||||
needs: define-variables
|
||||
permissions:
|
||||
contents: read
|
||||
|
@ -188,7 +187,7 @@ jobs:
|
|||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
# cache-to: type=gha,mode=max
|
||||
sbom: true
|
||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||
env:
|
||||
|
@ -211,7 +210,6 @@ jobs:
|
|||
|
||||
merge:
|
||||
runs-on: dind
|
||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
||||
needs: [define-variables, build-image]
|
||||
steps:
|
||||
- name: Download digests
|
||||
|
|
|
@ -6,6 +6,7 @@ use conduwuit::{
|
|||
warn,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use futures::FutureExt;
|
||||
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
||||
|
@ -155,7 +156,10 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
|||
evicting admins too)",
|
||||
);
|
||||
|
||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
|
||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None)
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
warn!("Failed to leave room: {e}");
|
||||
}
|
||||
|
||||
|
@ -323,7 +327,10 @@ async fn ban_list_of_rooms(&self) -> Result {
|
|||
evicting admins too)",
|
||||
);
|
||||
|
||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
|
||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None)
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
warn!("Failed to leave room: {e}");
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ use conduwuit::{
|
|||
};
|
||||
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
|
||||
use futures::StreamExt;
|
||||
use futures::FutureExt;
|
||||
use ruma::{
|
||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
|
||||
events::{
|
||||
|
@ -655,7 +656,9 @@ pub(super) async fn force_leave_room(
|
|||
return Err!("{user_id} is not joined in the room");
|
||||
}
|
||||
|
||||
leave_room(self.services, &user_id, &room_id, None).await?;
|
||||
leave_room(self.services, &user_id, &room_id, None)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("{user_id} has left {room_id}.",))
|
||||
.await
|
||||
|
|
|
@ -763,7 +763,9 @@ pub(crate) async fn deactivate_route(
|
|||
super::update_displayname(&services, sender_user, None, &all_joined_rooms).await;
|
||||
super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await;
|
||||
|
||||
full_user_deactivate(&services, sender_user, &all_joined_rooms).await?;
|
||||
full_user_deactivate(&services, sender_user, &all_joined_rooms)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
info!("User {sender_user} deactivated their account.");
|
||||
|
||||
|
@ -915,7 +917,9 @@ pub async fn full_user_deactivate(
|
|||
}
|
||||
}
|
||||
|
||||
super::leave_all_rooms(services, user_id).await;
|
||||
super::leave_all_rooms(services, user_id)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -114,7 +114,9 @@ async fn banned_room_check(
|
|||
.collect()
|
||||
.await;
|
||||
|
||||
full_user_deactivate(services, user_id, &all_joined_rooms).await?;
|
||||
full_user_deactivate(services, user_id, &all_joined_rooms)
|
||||
.boxed()
|
||||
.await?;
|
||||
}
|
||||
|
||||
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
|
||||
|
@ -153,7 +155,9 @@ async fn banned_room_check(
|
|||
.collect()
|
||||
.await;
|
||||
|
||||
full_user_deactivate(services, user_id, &all_joined_rooms).await?;
|
||||
full_user_deactivate(services, user_id, &all_joined_rooms)
|
||||
.boxed()
|
||||
.await?;
|
||||
}
|
||||
|
||||
return Err!(Request(Forbidden("This remote server is banned on this homeserver.")));
|
||||
|
@ -259,6 +263,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
|||
room_id.server_name(),
|
||||
client,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
let mut servers = body.via.clone();
|
||||
|
@ -478,6 +483,7 @@ pub(crate) async fn leave_room_route(
|
|||
body: Ruma<leave_room::v3::Request>,
|
||||
) -> Result<leave_room::v3::Response> {
|
||||
leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone())
|
||||
.boxed()
|
||||
.await
|
||||
.map(|()| leave_room::v3::Response::new())
|
||||
}
|
||||
|
@ -1792,7 +1798,10 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
|
|||
|
||||
for room_id in all_rooms {
|
||||
// ignore errors
|
||||
if let Err(e) = leave_room(services, user_id, &room_id, None).await {
|
||||
if let Err(e) = leave_room(services, user_id, &room_id, None)
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
warn!(%user_id, "Failed to leave {room_id} remotely: {e}");
|
||||
}
|
||||
|
||||
|
@ -2162,6 +2171,109 @@ async fn knock_room_by_id_helper(
|
|||
}
|
||||
}
|
||||
|
||||
// For knock_restricted rooms, check if the user meets the restricted conditions
|
||||
// If they do, attempt to join instead of knock
|
||||
// This is not mentioned in the spec, but should be allowable (we're allowed to
|
||||
// auto-join invites to knocked rooms)
|
||||
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||
if let JoinRule::KnockRestricted(restricted) = &join_rule {
|
||||
let restriction_rooms: Vec<_> = restricted
|
||||
.allow
|
||||
.iter()
|
||||
.filter_map(|a| match a {
|
||||
| AllowRule::RoomMembership(r) => Some(&r.room_id),
|
||||
| _ => None,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Check if the user is in any of the allowed rooms
|
||||
let mut user_meets_restrictions = false;
|
||||
for restriction_room_id in &restriction_rooms {
|
||||
if services
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, restriction_room_id)
|
||||
.await
|
||||
{
|
||||
user_meets_restrictions = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If the user meets the restrictions, try joining instead
|
||||
if user_meets_restrictions {
|
||||
debug_info!(
|
||||
"{sender_user} meets the restricted criteria in knock_restricted room \
|
||||
{room_id}, attempting to join instead of knock"
|
||||
);
|
||||
// For this case, we need to drop the state lock and get a new one in
|
||||
// join_room_by_id_helper We need to release the lock here and let
|
||||
// join_room_by_id_helper acquire it again
|
||||
drop(state_lock);
|
||||
match join_room_by_id_helper(
|
||||
services,
|
||||
sender_user,
|
||||
room_id,
|
||||
reason.clone(),
|
||||
servers,
|
||||
None,
|
||||
&None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
|
||||
| Err(e) => {
|
||||
debug_warn!(
|
||||
"Failed to convert knock to join for {sender_user} in {room_id}: {e:?}"
|
||||
);
|
||||
// Get a new state lock for the remaining knock logic
|
||||
let new_state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
||||
let server_in_room = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(services.globals.server_name(), room_id)
|
||||
.await;
|
||||
|
||||
let local_knock = server_in_room
|
||||
|| servers.is_empty()
|
||||
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
|
||||
|
||||
if local_knock {
|
||||
knock_room_helper_local(
|
||||
services,
|
||||
sender_user,
|
||||
room_id,
|
||||
reason,
|
||||
servers,
|
||||
new_state_lock,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
} else {
|
||||
knock_room_helper_remote(
|
||||
services,
|
||||
sender_user,
|
||||
room_id,
|
||||
reason,
|
||||
servers,
|
||||
new_state_lock,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
}
|
||||
|
||||
return Ok(knock_room::v3::Response::new(room_id.to_owned()));
|
||||
},
|
||||
}
|
||||
}
|
||||
} else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
||||
debug_warn!(
|
||||
"{sender_user} attempted to knock on room {room_id} but its join rule is \
|
||||
{join_rule:?}, not knock or knock_restricted"
|
||||
);
|
||||
}
|
||||
|
||||
let server_in_room = services
|
||||
.rooms
|
||||
.state_cache
|
||||
|
@ -2209,6 +2321,12 @@ async fn knock_room_helper_local(
|
|||
return Err!(Request(Forbidden("This room does not support knocking.")));
|
||||
}
|
||||
|
||||
// Verify that this room has a valid knock or knock_restricted join rule
|
||||
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||
if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
||||
return Err!(Request(Forbidden("This room's join rule does not allow knocking.")));
|
||||
}
|
||||
|
||||
let content = RoomMemberEventContent {
|
||||
displayname: services.users.displayname(sender_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||
|
|
|
@ -121,7 +121,9 @@ where
|
|||
.map(|(key, val)| (key, val.collect()))
|
||||
.collect();
|
||||
|
||||
if !populate {
|
||||
if populate {
|
||||
rooms.push(summary_to_chunk(summary.clone()));
|
||||
} else {
|
||||
children = children
|
||||
.iter()
|
||||
.rev()
|
||||
|
@ -144,10 +146,8 @@ where
|
|||
.collect();
|
||||
}
|
||||
|
||||
if populate {
|
||||
rooms.push(summary_to_chunk(summary.clone()));
|
||||
} else if queue.is_empty() && children.is_empty() {
|
||||
return Err!(Request(InvalidParam("Room IDs in token were not found.")));
|
||||
if !populate && queue.is_empty() && children.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
parents.insert(current_room.clone());
|
||||
|
|
|
@ -6,6 +6,7 @@ use conduwuit::{
|
|||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::TryStreamExt;
|
||||
use futures::FutureExt;
|
||||
use ruma::{
|
||||
OwnedEventId, RoomId, UserId,
|
||||
api::client::state::{get_state_events, get_state_events_for_key, send_state_event},
|
||||
|
@ -59,6 +60,7 @@ pub(crate) async fn send_state_event_for_empty_key_route(
|
|||
body: Ruma<send_state_event::v3::Request>,
|
||||
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
||||
send_state_event_for_key_route(State(services), body)
|
||||
.boxed()
|
||||
.await
|
||||
.map(RumaResponse)
|
||||
}
|
||||
|
|
|
@ -1009,8 +1009,6 @@ async fn calculate_state_incremental<'a>(
|
|||
) -> Result<StateChanges> {
|
||||
let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash);
|
||||
|
||||
let state_changed = since_shortstatehash != current_shortstatehash;
|
||||
|
||||
let encrypted_room = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
|
@ -1042,7 +1040,7 @@ async fn calculate_state_incremental<'a>(
|
|||
})
|
||||
.into();
|
||||
|
||||
let state_diff_ids: OptionFuture<_> = (!full_state && state_changed)
|
||||
let state_diff_ids: OptionFuture<_> = (!full_state)
|
||||
.then(|| {
|
||||
StreamExt::into_future(
|
||||
services
|
||||
|
|
|
@ -79,12 +79,12 @@ fn main() {
|
|||
|
||||
// --- Rerun Triggers ---
|
||||
// TODO: The git rerun triggers seem to always run
|
||||
// Rerun if the git HEAD changes
|
||||
println!("cargo:rerun-if-changed=.git/HEAD");
|
||||
// Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
|
||||
if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"]) {
|
||||
println!("cargo:rerun-if-changed=.git/{ref_path}");
|
||||
}
|
||||
// // Rerun if the git HEAD changes
|
||||
// println!("cargo:rerun-if-changed=.git/HEAD");
|
||||
// // Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
|
||||
// if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"])
|
||||
// { println!("cargo:rerun-if-changed=.git/{ref_path}");
|
||||
// }
|
||||
|
||||
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH");
|
||||
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT");
|
||||
|
|
|
@ -2059,41 +2059,41 @@ fn default_database_backups_to_keep() -> i16 { 1 }
|
|||
|
||||
fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) }
|
||||
|
||||
fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) }
|
||||
fn default_db_cache_capacity_mb() -> f64 { 256.0 + parallelism_scaled_f64(256.0) }
|
||||
|
||||
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
|
||||
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(200_000) }
|
||||
|
||||
fn default_cache_capacity_modifier() -> f64 { 1.0 }
|
||||
|
||||
fn default_auth_chain_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||
}
|
||||
|
||||
fn default_shorteventid_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(50_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(50_000).saturating_add(200_000)
|
||||
}
|
||||
|
||||
fn default_eventidshort_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(25_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||
}
|
||||
|
||||
fn default_eventid_pdu_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(25_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||
}
|
||||
|
||||
fn default_shortstatekey_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||
}
|
||||
|
||||
fn default_statekeyshort_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||
}
|
||||
|
||||
fn default_servernameevent_data_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(100_000).saturating_add(500_000)
|
||||
parallelism_scaled_u32(200_000).saturating_add(500_000)
|
||||
}
|
||||
|
||||
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) }
|
||||
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(2000) }
|
||||
|
||||
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
||||
|
||||
|
|
|
@ -638,7 +638,7 @@ fn valid_membership_change(
|
|||
warn!(?target_user_membership_event_id, "Banned user can't join");
|
||||
false
|
||||
} else if (join_rules == JoinRule::Invite
|
||||
|| room_version.allow_knocking && join_rules == JoinRule::Knock)
|
||||
|| room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_))))
|
||||
// If the join_rule is invite then allow if membership state is invite or join
|
||||
&& (target_user_current_membership == MembershipState::Join
|
||||
|| target_user_current_membership == MembershipState::Invite)
|
||||
|
|
|
@ -21,7 +21,10 @@ pub use ::toml;
|
|||
pub use ::tracing;
|
||||
pub use config::Config;
|
||||
pub use error::Error;
|
||||
pub use info::{rustc_flags_capture, version, version::version};
|
||||
pub use info::{
|
||||
rustc_flags_capture, version,
|
||||
version::{name, version},
|
||||
};
|
||||
pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res};
|
||||
pub use server::Server;
|
||||
pub use utils::{ctor, dtor, implement, result, result::Result};
|
||||
|
|
|
@ -29,7 +29,7 @@ fn descriptor_cf_options(
|
|||
set_table_options(&mut opts, &desc, cache)?;
|
||||
|
||||
opts.set_min_write_buffer_number(1);
|
||||
opts.set_max_write_buffer_number(2);
|
||||
opts.set_max_write_buffer_number(3);
|
||||
opts.set_write_buffer_size(desc.write_size);
|
||||
|
||||
opts.set_target_file_size_base(desc.file_size);
|
||||
|
|
|
@ -15,7 +15,7 @@ use conduwuit_core::{
|
|||
#[clap(
|
||||
about,
|
||||
long_about = None,
|
||||
name = "conduwuit",
|
||||
name = conduwuit_core::name(),
|
||||
version = conduwuit_core::version(),
|
||||
)]
|
||||
pub(crate) struct Args {
|
||||
|
|
|
@ -4,7 +4,6 @@ mod execute;
|
|||
mod grant;
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::{Arc, RwLock as StdRwLock, Weak},
|
||||
};
|
||||
|
@ -14,7 +13,7 @@ use conduwuit::{
|
|||
Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder,
|
||||
};
|
||||
pub use create::create_admin_room;
|
||||
use futures::{FutureExt, TryFutureExt};
|
||||
use futures::{Future, FutureExt, TryFutureExt};
|
||||
use loole::{Receiver, Sender};
|
||||
use ruma::{
|
||||
OwnedEventId, OwnedRoomId, RoomId, UserId,
|
||||
|
|
|
@ -306,28 +306,25 @@ impl super::Service {
|
|||
|
||||
#[tracing::instrument(name = "srv", level = "debug", skip(self))]
|
||||
async fn query_srv_record(&self, hostname: &'_ str) -> Result<Option<FedDest>> {
|
||||
let hostnames =
|
||||
[format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")];
|
||||
self.services.server.check_running()?;
|
||||
|
||||
for hostname in hostnames {
|
||||
self.services.server.check_running()?;
|
||||
debug!("querying SRV for {hostname:?}");
|
||||
|
||||
debug!("querying SRV for {hostname:?}");
|
||||
let hostname = hostname.trim_end_matches('.');
|
||||
match self.resolver.resolver.srv_lookup(hostname).await {
|
||||
| Err(e) => Self::handle_resolve_error(&e, hostname)?,
|
||||
| Ok(result) => {
|
||||
return Ok(result.iter().next().map(|result| {
|
||||
FedDest::Named(
|
||||
result.target().to_string().trim_end_matches('.').to_owned(),
|
||||
format!(":{}", result.port())
|
||||
.as_str()
|
||||
.try_into()
|
||||
.unwrap_or_else(|_| FedDest::default_port()),
|
||||
)
|
||||
}));
|
||||
},
|
||||
}
|
||||
let hostname_suffix = format!("_matrix-fed._tcp.{hostname}.");
|
||||
let hostname = hostname_suffix.trim_end_matches('.');
|
||||
match self.resolver.resolver.srv_lookup(hostname).await {
|
||||
| Err(e) => Self::handle_resolve_error(&e, hostname)?,
|
||||
| Ok(result) => {
|
||||
return Ok(result.iter().next().map(|result| {
|
||||
FedDest::Named(
|
||||
result.target().to_string().trim_end_matches('.').to_owned(),
|
||||
format!(":{}", result.port())
|
||||
.as_str()
|
||||
.try_into()
|
||||
.unwrap_or_else(|_| FedDest::default_port()),
|
||||
)
|
||||
}));
|
||||
},
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
|
|
Loading…
Add table
Reference in a new issue