Browse Source

make sure PDUs aren't allocated twice, ever

merge-requests/198/head
Jonathan de Jong 4 years ago
parent
commit
57db96f239
  1. 7
      src/database.rs
  2. 13
      src/database/rooms.rs
  3. 65
      src/utils.rs

7
src/database.rs

@ -14,7 +14,10 @@ pub mod transaction_ids; @@ -14,7 +14,10 @@ pub mod transaction_ids;
pub mod uiaa;
pub mod users;
use crate::{utils, Error, Result};
use crate::{
utils::{self, ArcingLruCache},
Error, Result,
};
use abstraction::DatabaseEngine;
use directories::ProjectDirs;
use lru_cache::LruCache;
@ -292,7 +295,7 @@ impl Database { @@ -292,7 +295,7 @@ impl Database {
softfailedeventids: builder.open_tree("softfailedeventids")?,
referencedevents: builder.open_tree("referencedevents")?,
pdu_cache: Mutex::new(LruCache::new(
pdu_cache: Mutex::new(ArcingLruCache::new(
config
.pdu_cache_capacity
.try_into()

13
src/database/rooms.rs

@ -3,7 +3,12 @@ mod edus; @@ -3,7 +3,12 @@ mod edus;
pub use edus::RoomEdus;
use member::MembershipState;
use crate::{pdu::PduBuilder, server_server, utils, Database, Error, PduEvent, Result};
use crate::{
pdu::PduBuilder,
server_server,
utils::{self, ArcingLruCache},
Database, Error, PduEvent, Result,
};
use lru_cache::LruCache;
use regex::Regex;
use ring::digest;
@ -96,7 +101,7 @@ pub struct Rooms { @@ -96,7 +101,7 @@ pub struct Rooms {
/// RoomId + EventId -> Parent PDU EventId.
pub(super) referencedevents: Arc<dyn Tree>,
pub(super) pdu_cache: Mutex<LruCache<EventId, Arc<PduEvent>>>,
pub(super) pdu_cache: Mutex<ArcingLruCache<EventId, PduEvent>>,
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
pub(super) eventidshort_cache: Mutex<LruCache<EventId, u64>>,
@ -1081,8 +1086,8 @@ impl Rooms { @@ -1081,8 +1086,8 @@ impl Rooms {
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
#[tracing::instrument(skip(self))]
pub fn get_pdu(&self, event_id: &EventId) -> Result<Option<Arc<PduEvent>>> {
if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(&event_id) {
return Ok(Some(Arc::clone(p)));
if let Some(p) = self.pdu_cache.lock().unwrap().get(&event_id as &EventId) {
return Ok(Some(p));
}
if let Some(pdu) = self

65
src/utils.rs

@ -1,11 +1,15 @@ @@ -1,11 +1,15 @@
use argon2::{Config, Variant};
use cmp::Ordering;
use lru_cache::LruCache;
use rand::prelude::*;
use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject};
use std::{
borrow::Borrow,
cmp,
collections::HashMap,
convert::TryInto,
str::FromStr,
sync::{Arc, Weak},
time::{SystemTime, UNIX_EPOCH},
};
@ -141,3 +145,64 @@ pub fn deserialize_from_str< @@ -141,3 +145,64 @@ pub fn deserialize_from_str<
}
deserializer.deserialize_str(Visitor(std::marker::PhantomData))
}
pub struct ArcingLruCache<K: Eq + std::hash::Hash, V> {
lru: LruCache<Arc<K>, Arc<V>>,
map: HashMap<Arc<K>, Weak<V>>,
}
impl<K: Eq + std::hash::Hash, V> ArcingLruCache<K, V> {
pub fn new(capacity: usize) -> Self {
ArcingLruCache {
lru: LruCache::new(capacity),
map: HashMap::new(),
}
}
pub fn prune_dead(&mut self) {
self.map.retain(|_, v| v.strong_count() > 0)
}
pub fn insert(&mut self, k: K, v: Arc<V>) {
let key = Arc::new(k);
let v_weak = Arc::downgrade(&v);
self.lru.insert(key.clone(), v);
self.map.insert(key, v_weak);
}
pub fn get<Q: ?Sized>(&mut self, k: &Q) -> Option<Arc<V>>
where
Arc<K>: Borrow<Q>,
Q: std::hash::Hash + Eq,
{
if let Some(v) = self.lru.get_mut(&k) {
return Some(v.clone());
}
if let Some((map_k, weak)) = self.map.get_key_value(&k) {
if let Some(strong) = weak.upgrade() {
// Revived
self.lru.insert(map_k.clone(), strong.clone());
Some(strong)
} else {
// Dead
// possible race condition is prevented with &mut self
// self.map.remove(&k);
// prune_dead is done here as it's likely that the map has more non-functional weaks if one is encountered
// unless all inserts to the map has thus far been unique, this should soon-or-later trigger,
// and fairly "rarely" yet consistently
self.prune_dead();
None
}
} else {
None
}
}
}

Loading…
Cancel
Save