|
|
|
@ -8,7 +8,7 @@ use std::{ |
|
|
|
future::Future, |
|
|
|
future::Future, |
|
|
|
path::{Path, PathBuf}, |
|
|
|
path::{Path, PathBuf}, |
|
|
|
pin::Pin, |
|
|
|
pin::Pin, |
|
|
|
sync::Arc, |
|
|
|
sync::{Arc, Weak}, |
|
|
|
}; |
|
|
|
}; |
|
|
|
use thread_local::ThreadLocal; |
|
|
|
use thread_local::ThreadLocal; |
|
|
|
use tokio::sync::watch; |
|
|
|
use tokio::sync::watch; |
|
|
|
@ -46,6 +46,8 @@ pub struct Engine { |
|
|
|
|
|
|
|
|
|
|
|
path: PathBuf, |
|
|
|
path: PathBuf, |
|
|
|
cache_size_per_thread: u32, |
|
|
|
cache_size_per_thread: u32, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub(in crate::database) tables: RwLock<HashMap<String, Weak<SqliteTable>>>, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
impl Engine { |
|
|
|
impl Engine { |
|
|
|
@ -102,19 +104,46 @@ impl DatabaseEngine for Engine { |
|
|
|
read_iterator_conn_tls: ThreadLocal::new(), |
|
|
|
read_iterator_conn_tls: ThreadLocal::new(), |
|
|
|
path, |
|
|
|
path, |
|
|
|
cache_size_per_thread, |
|
|
|
cache_size_per_thread, |
|
|
|
|
|
|
|
tables: RwLock::new(HashMap::new()), |
|
|
|
}); |
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
Ok(arc) |
|
|
|
Ok(arc) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn open_tree(self: &Arc<Self>, name: &str) -> Result<Arc<dyn Tree>> { |
|
|
|
fn open_tree(self: &Arc<Self>, name: &str) -> Result<Arc<dyn Tree>> { |
|
|
|
self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; |
|
|
|
fn create_new(engine: &Arc<Engine>, name: &str) -> Result<SqliteTable> { |
|
|
|
|
|
|
|
engine.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Ok(SqliteTable { |
|
|
|
|
|
|
|
engine: Arc::clone(engine), |
|
|
|
|
|
|
|
name: name.to_owned(), |
|
|
|
|
|
|
|
watchers: RwLock::new(HashMap::new()), |
|
|
|
|
|
|
|
}) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Table mappings are `Weak` to prevent reference cycles, that creates this additional correctness logic.
|
|
|
|
|
|
|
|
Ok(match self.tables.write().entry(name.to_string()) { |
|
|
|
|
|
|
|
hash_map::Entry::Occupied(mut o) => { |
|
|
|
|
|
|
|
if let Some(table) = o.get().upgrade() { |
|
|
|
|
|
|
|
table |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
// On the off-chance that a table was dropped somewhere...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let table = Arc::new(create_new(self, name)?); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
o.insert(Arc::downgrade(&table)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
table |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
hash_map::Entry::Vacant(v) => { |
|
|
|
|
|
|
|
let table = Arc::new(create_new(self, name)?); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
v.insert(Arc::downgrade(&table)); |
|
|
|
|
|
|
|
|
|
|
|
Ok(Arc::new(SqliteTable { |
|
|
|
table |
|
|
|
engine: Arc::clone(self), |
|
|
|
} |
|
|
|
name: name.to_owned(), |
|
|
|
}) |
|
|
|
watchers: RwLock::new(HashMap::new()), |
|
|
|
|
|
|
|
})) |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn flush(self: &Arc<Self>) -> Result<()> { |
|
|
|
fn flush(self: &Arc<Self>) -> Result<()> { |
|
|
|
@ -375,7 +404,8 @@ impl Tree for SqliteTable { |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
Box::pin(async move { |
|
|
|
Box::pin(async move { |
|
|
|
// Tx is never destroyed
|
|
|
|
// Tx will be destroyed if receiver_count = 1,
|
|
|
|
|
|
|
|
// and self.watchers is held with write lock
|
|
|
|
rx.changed().await.unwrap(); |
|
|
|
rx.changed().await.unwrap(); |
|
|
|
}) |
|
|
|
}) |
|
|
|
} |
|
|
|
} |
|
|
|
@ -390,3 +420,41 @@ impl Tree for SqliteTable { |
|
|
|
Ok(()) |
|
|
|
Ok(()) |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
impl SqliteTable { |
|
|
|
|
|
|
|
pub fn prune_dead_watchers(&self) -> bool { |
|
|
|
|
|
|
|
let watchers = self.watchers.read(); |
|
|
|
|
|
|
|
let mut interesting = Vec::new(); |
|
|
|
|
|
|
|
for (key, (tx, _)) in watchers.iter() { |
|
|
|
|
|
|
|
if tx.receiver_count() == 1 { |
|
|
|
|
|
|
|
// We do read mode first to get all "interesting" keys, not blocking other watch threads.
|
|
|
|
|
|
|
|
interesting.push(key.clone()) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
drop(watchers); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if !interesting.is_empty() { |
|
|
|
|
|
|
|
let mut watchers = self.watchers.write(); |
|
|
|
|
|
|
|
let mut cleared = 0; |
|
|
|
|
|
|
|
for prefix in interesting { |
|
|
|
|
|
|
|
// Test for occupied, there is only the slightest chance this entry has been deleted inbetween then and now.
|
|
|
|
|
|
|
|
if let hash_map::Entry::Occupied(o) = watchers.entry(prefix) { |
|
|
|
|
|
|
|
// Check one last time, its possible a receiver is cloned inbetween then and now.
|
|
|
|
|
|
|
|
if o.get().0.receiver_count() == 1 { |
|
|
|
|
|
|
|
// Receiver == 1, this is the receiver bundled in the entry, so it's okay to drop this.
|
|
|
|
|
|
|
|
o.remove_entry(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cleared += 1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
debug!("Cleared {} dead watchers.", cleared); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return cleared != 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
false |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|