refactor: Replace remaining std Mutexes

This commit is contained in:
Jade Ellis 2025-07-19 22:05:43 +01:00
commit 374fb2745c
No known key found for this signature in database
GPG key ID: 8705A2A3EBF77BD2
11 changed files with 83 additions and 117 deletions

View file

@ -71,7 +71,7 @@ pub fn backup_count(&self) -> Result<usize> {
fn backup_engine(&self) -> Result<BackupEngine> {
let path = self.backup_path()?;
let options = BackupEngineOptions::new(path).map_err(map_err)?;
BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err)
BackupEngine::open(&options, &self.ctx.env.lock()).map_err(map_err)
}
#[implement(Engine)]

View file

@ -232,7 +232,7 @@ fn get_cache(ctx: &Context, desc: &Descriptor) -> Option<Cache> {
cache_opts.set_num_shard_bits(shard_bits);
cache_opts.set_capacity(size);
let mut caches = ctx.col_cache.lock().expect("locked");
let mut caches = ctx.col_cache.lock();
match desc.cache_disp {
| CacheDisp::Unique if desc.cache_size == 0 => None,
| CacheDisp::Unique => {

View file

@ -1,9 +1,6 @@
use std::{
collections::BTreeMap,
sync::{Arc, Mutex},
};
use std::{collections::BTreeMap, sync::Arc};
use conduwuit::{Result, Server, debug, utils::math::usize_from_f64};
use conduwuit::{Result, Server, SyncMutex, debug, utils::math::usize_from_f64};
use rocksdb::{Cache, Env, LruCacheOptions};
use crate::{or_else, pool::Pool};
@ -14,9 +11,9 @@ use crate::{or_else, pool::Pool};
/// These assets are housed in the shared Context.
pub(crate) struct Context {
pub(crate) pool: Arc<Pool>,
pub(crate) col_cache: Mutex<BTreeMap<String, Cache>>,
pub(crate) row_cache: Mutex<Cache>,
pub(crate) env: Mutex<Env>,
pub(crate) col_cache: SyncMutex<BTreeMap<String, Cache>>,
pub(crate) row_cache: SyncMutex<Cache>,
pub(crate) env: SyncMutex<Env>,
pub(crate) server: Arc<Server>,
}
@ -68,7 +65,7 @@ impl Drop for Context {
debug!("Closing frontend pool");
self.pool.close();
let mut env = self.env.lock().expect("locked");
let mut env = self.env.lock();
debug!("Shutting down background threads");
env.set_high_priority_background_threads(0);

View file

@ -9,7 +9,7 @@ use crate::or_else;
#[implement(Engine)]
pub fn memory_usage(&self) -> Result<String> {
let mut res = String::new();
let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&*self.ctx.row_cache.lock()?]))
let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&*self.ctx.row_cache.lock()]))
.or_else(or_else)?;
let mibs = |input| f64::from(u32::try_from(input / 1024).unwrap_or(0)) / 1024.0;
writeln!(
@ -19,10 +19,10 @@ pub fn memory_usage(&self) -> Result<String> {
mibs(stats.mem_table_total),
mibs(stats.mem_table_unflushed),
mibs(stats.mem_table_readers_total),
mibs(u64::try_from(self.ctx.row_cache.lock()?.get_usage())?),
mibs(u64::try_from(self.ctx.row_cache.lock().get_usage())?),
)?;
for (name, cache) in &*self.ctx.col_cache.lock()? {
for (name, cache) in &*self.ctx.col_cache.lock() {
writeln!(res, "{name} cache: {:.2} MiB", mibs(u64::try_from(cache.get_usage())?))?;
}

View file

@ -23,11 +23,7 @@ pub(crate) async fn open(ctx: Arc<Context>, desc: &[Descriptor]) -> Result<Arc<S
let config = &server.config;
let path = &config.database_path;
let db_opts = db_options(
config,
&ctx.env.lock().expect("environment locked"),
&ctx.row_cache.lock().expect("row cache locked"),
)?;
let db_opts = db_options(config, &ctx.env.lock(), &ctx.row_cache.lock())?;
let cfds = Self::configure_cfds(&ctx, &db_opts, desc)?;
let num_cfds = cfds.len();

View file

@ -3,7 +3,7 @@ mod configure;
use std::{
mem::take,
sync::{
Arc, Mutex,
Arc,
atomic::{AtomicUsize, Ordering},
},
thread,
@ -12,7 +12,7 @@ use std::{
use async_channel::{QueueStrategy, Receiver, RecvError, Sender};
use conduwuit::{
Error, Result, Server, debug, err, error, implement,
Error, Result, Server, SyncMutex, debug, err, error, implement,
result::DebugInspect,
smallvec::SmallVec,
trace,
@ -31,7 +31,7 @@ use crate::{Handle, Map, keyval::KeyBuf, stream};
pub(crate) struct Pool {
server: Arc<Server>,
queues: Vec<Sender<Cmd>>,
workers: Mutex<Vec<JoinHandle<()>>>,
workers: SyncMutex<Vec<JoinHandle<()>>>,
topology: Vec<usize>,
busy: AtomicUsize,
queued_max: AtomicUsize,
@ -115,7 +115,7 @@ impl Drop for Pool {
#[implement(Pool)]
#[tracing::instrument(skip_all)]
pub(crate) fn close(&self) {
let workers = take(&mut *self.workers.lock().expect("locked"));
let workers = take(&mut *self.workers.lock());
let senders = self.queues.iter().map(Sender::sender_count).sum::<usize>();
@ -154,7 +154,7 @@ pub(crate) fn close(&self) {
#[implement(Pool)]
fn spawn_until(self: &Arc<Self>, recv: &[Receiver<Cmd>], count: usize) -> Result {
let mut workers = self.workers.lock().expect("locked");
let mut workers = self.workers.lock();
while workers.len() < count {
self.clone().spawn_one(&mut workers, recv)?;
}