Browse Source

remove extra advanced config and make db_cache_capacity_mb total

merge-requests/114/head
Jonathan de Jong 5 years ago
parent
commit
251d19d06c
  1. 22
      DEPLOY.md
  2. 22
      conduit-example.toml
  3. 22
      debian/postinst
  4. 8
      src/database.rs
  5. 2
      src/database/abstraction/sled.rs
  6. 10
      src/database/abstraction/sqlite.rs

22
DEPLOY.md

@ -111,26 +111,8 @@ trusted_servers = ["matrix.org"]
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
## sqlite # The total amount of memory that the database will use.
#db_cache_capacity_mb = 10
# The amount of memory that the database will use, with the following formula;
# (db_cache_capacity * (sqlite_read_pool_size + 1)), in bytes
#db_cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
# How many permanent read connections will be open to the database,
# increase this if you see "creating spillover reader" in your logs.
#sqlite_read_pool_size = 2 # default: max(cpu core count, 1)
# If the database WAL (conduit.db-wal file) should be cleaned on a timer.
#sqlite_wal_clean_timer = false
# How many seconds should pass before the WAL clean task should fire.
# Note: Is dependant on sqlite_wal_clean_timer being true.
#sqlite_wal_clean_second_interval = 60
# How long the WAL clean task should (in seconds) try to wait while
# getting exclusive access to the database (before giving up).
#sqlite_wal_clean_second_timeout = 2
``` ```
## Setting the correct file permissions ## Setting the correct file permissions

22
conduit-example.toml

@ -43,23 +43,5 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re
proxy = "none" # more examples can be found at src/database/proxy.rs:6 proxy = "none" # more examples can be found at src/database/proxy.rs:6
## sqlite # The total amount of memory that the database will use.
#db_cache_capacity_mb = 10
# The amount of memory that the database will use, with the following formula;
# (db_cache_capacity * (sqlite_read_pool_size + 1)), in bytes
#db_cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
# How many permanent read connections will be open to the database,
# increase this if you see "creating spillover reader" in your logs.
#sqlite_read_pool_size = 2 # default: max(cpu core count, 1)
# If the database WAL (conduit.db-wal file) should be cleaned on a timer.
#sqlite_wal_clean_timer = false
# How many seconds should pass before the WAL clean task should fire.
# Note: Is dependant on sqlite_wal_clean_timer being true.
#sqlite_wal_clean_second_interval = 60
# How long the WAL clean task should (in seconds) try to wait while
# getting exclusive access to the database (before giving up).
#sqlite_wal_clean_second_timeout = 2

22
debian/postinst vendored

@ -77,26 +77,8 @@ max_request_size = 20_000_000 # in bytes
#log = "info,state_res=warn,rocket=off,_=off,sled=off" #log = "info,state_res=warn,rocket=off,_=off,sled=off"
#workers = 4 # default: cpu core count * 2 #workers = 4 # default: cpu core count * 2
## sqlite # The total amount of memory that the database will use.
#db_cache_capacity_mb = 10
# The amount of memory that the database will use, with the following formula;
# (db_cache_capacity * (sqlite_read_pool_size + 1)), in bytes
#db_cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
# How many permanent read connections will be open to the database,
# increase this if you see "creating spillover reader" in your logs.
#sqlite_read_pool_size = 2 # default: max(cpu core count, 1)
# If the database WAL (conduit.db-wal file) should be cleaned on a timer.
#sqlite_wal_clean_timer = false
# How many seconds should pass before the WAL clean task should fire.
# Note: Is dependant on sqlite_wal_clean_timer being true.
#sqlite_wal_clean_second_interval = 60
# How long the WAL clean task should (in seconds) try to wait while
# getting exclusive access to the database (before giving up).
#sqlite_wal_clean_second_timeout = 2
EOF EOF
fi fi
;; ;;

8
src/database.rs

@ -43,8 +43,8 @@ use self::proxy::ProxyConfig;
pub struct Config { pub struct Config {
server_name: Box<ServerName>, server_name: Box<ServerName>,
database_path: String, database_path: String,
#[serde(default = "default_db_cache_capacity")] #[serde(default = "default_db_cache_capacity_mb")]
db_cache_capacity: u32, db_cache_capacity_mb: f64,
#[serde(default = "default_sqlite_read_pool_size")] #[serde(default = "default_sqlite_read_pool_size")]
sqlite_read_pool_size: usize, sqlite_read_pool_size: usize,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
@ -105,8 +105,8 @@ fn true_fn() -> bool {
true true
} }
fn default_db_cache_capacity() -> u32 { fn default_db_cache_capacity_mb() -> f64 {
1024 * 1024 * 1024 10.0
} }
fn default_sqlite_read_pool_size() -> usize { fn default_sqlite_read_pool_size() -> usize {

2
src/database/abstraction/sled.rs

@ -14,7 +14,7 @@ impl DatabaseEngine for Engine {
Ok(Arc::new(Engine( Ok(Arc::new(Engine(
sled::Config::default() sled::Config::default()
.path(&config.database_path) .path(&config.database_path)
.cache_capacity(config.db_cache_capacity as u64) .cache_capacity((config.db_cache_capacity_mb * 1024 * 1024) as u64)
.use_compression(true) .use_compression(true)
.open()?, .open()?,
))) )))

10
src/database/abstraction/sqlite.rs

@ -58,7 +58,13 @@ impl<'a> Deref for HoldingConn<'a> {
} }
impl Pool { impl Pool {
fn new<P: AsRef<Path>>(path: P, num_readers: usize, cache_size: u32) -> Result<Self> { fn new<P: AsRef<Path>>(path: P, num_readers: usize, total_cache_size_mb: f64) -> Result<Self> {
// calculates cache-size per permanent connection
// 1. convert MB to KiB
// 2. divide by permanent connections
// 3. round down to nearest integer
let cache_size: u32 = ((total_cache_size_mb * 1024.0) / (num_readers + 1) as f64) as u32;
let writer = Mutex::new(Self::prepare_conn(&path, Some(cache_size))?); let writer = Mutex::new(Self::prepare_conn(&path, Some(cache_size))?);
let mut readers = Vec::new(); let mut readers = Vec::new();
@ -128,7 +134,7 @@ impl DatabaseEngine for Engine {
let pool = Pool::new( let pool = Pool::new(
Path::new(&config.database_path).join("conduit.db"), Path::new(&config.database_path).join("conduit.db"),
config.sqlite_read_pool_size, config.sqlite_read_pool_size,
config.db_cache_capacity / 1024, // bytes -> kb config.db_cache_capacity_mb,
)?; )?;
pool.write_lock() pool.write_lock()

Loading…
Cancel
Save