Compare commits

...

29 Commits
next ... locks

Author SHA1 Message Date
Timo Kösters 2423de2d51
improvement: locks 5 years ago
Timo Kösters 553fea9a41
improvement: efficient /sync, mutex for federation transactions 5 years ago
Timo Kösters 25c7333112
fix: stuck messages 5 years ago
Timo Kösters 1bba271916
fix: e2ee verification 5 years ago
Timo Kösters ea2fc3adc0
improvement: more efficient state res 5 years ago
Jonathan de Jong b89cffed34 warn on deprecated keys 5 years ago
Jonathan de Jong 3260ae01b8 change references of cache_capacity to db_cache_capacity 5 years ago
Jonathan de Jong 7e0aab7852 shuffle main.rs to allow deprecation warnings 5 years ago
Jonathan de Jong 6e8beb604d support some deprecations 5 years ago
Jonathan de Jong 735d7a0815 database iter_from fix 5 years ago
Jonathan de Jong caa0cbfe1d change fairmutex to mutex 5 years ago
Jonathan de Jong 7e9014d5c9 implement sync rotation 5 years ago
Jonathan de Jong bcfea98457 replace ReadGuard with DatabaseGuard 5 years ago
Jonathan de Jong 3a76fda92b incorperate feedback 5 years ago
Jonathan de Jong 318d9c1a35 revert docker-compose.yml file 5 years ago
Jonathan de Jong f4aabbdaa7 add some flushes 5 years ago
Jonathan de Jong 7c82213ee7 change to use path joining properly 5 years ago
Jonathan de Jong 0719377c6a merge one more {use} 5 years ago
Jonathan de Jong 494585267a remove rjbench 5 years ago
Jonathan de Jong 0c23874194 add config and optimise 5 years ago
Jonathan de Jong dc5f1f41fd some more fixes to allow sled to work 5 years ago
Jonathan de Jong f81018ab2d reverse iterator funk 5 years ago
Jonathan de Jong e5a26de606 misc cleanup 5 years ago
Jonathan de Jong 5ec0be2b41 fmt 5 years ago
Jonathan de Jong a55dec9035 add better performance around syncs 5 years ago
Jonathan de Jong 14e6afc45e remove eldrich being and install good being 5 years ago
Jonathan de Jong 9df86c2c1e lock update 5 years ago
Jonathan de Jong 0753076e94 chutulu is my copilot 5 years ago
Jonathan de Jong 22e3416745 YEET 5 years ago
  1. 1
      .gitignore
  2. 139
      Cargo.lock
  3. 8
      Cargo.toml
  4. 2
      DEPLOY.md
  5. 2
      conduit-example.toml
  6. 2
      debian/postinst
  7. 45
      src/client_server/account.rs
  8. 11
      src/client_server/alias.rs
  9. 33
      src/client_server/backup.rs
  10. 13
      src/client_server/config.rs
  11. 7
      src/client_server/context.rs
  12. 15
      src/client_server/device.rs
  13. 13
      src/client_server/directory.rs
  14. 21
      src/client_server/keys.rs
  15. 15
      src/client_server/media.rs
  16. 117
      src/client_server/membership.rs
  17. 20
      src/client_server/message.rs
  18. 4
      src/client_server/mod.rs
  19. 9
      src/client_server/presence.rs
  20. 47
      src/client_server/profile.rs
  21. 25
      src/client_server/push.rs
  22. 9
      src/client_server/read_marker.rs
  23. 21
      src/client_server/redact.rs
  24. 50
      src/client_server/room.rs
  25. 6
      src/client_server/search.rs
  26. 12
      src/client_server/session.rs
  27. 26
      src/client_server/state.rs
  28. 415
      src/client_server/sync.rs
  29. 11
      src/client_server/tag.rs
  30. 10
      src/client_server/to_device.rs
  31. 7
      src/client_server/typing.rs
  32. 7
      src/client_server/user_directory.rs
  33. 206
      src/database.rs
  34. 297
      src/database/abstraction.rs
  35. 176
      src/database/abstraction/rocksdb.rs
  36. 119
      src/database/abstraction/sled.rs
  37. 438
      src/database/abstraction/sqlite.rs
  38. 2
      src/database/account_data.rs
  39. 55
      src/database/admin.rs
  40. 4
      src/database/appservice.rs
  41. 33
      src/database/globals.rs
  42. 2
      src/database/pusher.rs
  43. 60
      src/database/rooms.rs
  44. 57
      src/database/sending.rs
  45. 9
      src/database/users.rs
  46. 6
      src/error.rs
  47. 27
      src/main.rs
  48. 10
      src/ruma_wrapper.rs
  49. 221
      src/server_server.rs

1
.gitignore vendored

@ -59,6 +59,7 @@ $RECYCLE.BIN/
# Conduit # Conduit
Rocket.toml Rocket.toml
conduit.toml conduit.toml
conduit.db
# Etc. # Etc.
**/*.rs.bk **/*.rs.bk

139
Cargo.lock generated

@ -6,6 +6,17 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234"
[[package]]
name = "ahash"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98"
dependencies = [
"getrandom 0.2.2",
"once_cell",
"version_check",
]
[[package]] [[package]]
name = "aho-corasick" name = "aho-corasick"
version = "0.7.15" version = "0.7.15"
@ -238,14 +249,17 @@ version = "0.1.0"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"bytes", "bytes",
"crossbeam",
"directories", "directories",
"http", "http",
"image", "image",
"jsonwebtoken", "jsonwebtoken",
"log", "log",
"lru-cache", "lru-cache",
"num_cpus",
"opentelemetry", "opentelemetry",
"opentelemetry-jaeger", "opentelemetry-jaeger",
"parking_lot",
"pretty_env_logger", "pretty_env_logger",
"rand 0.8.3", "rand 0.8.3",
"regex", "regex",
@ -254,6 +268,7 @@ dependencies = [
"rocket", "rocket",
"rocksdb", "rocksdb",
"ruma", "ruma",
"rusqlite",
"rust-argon2", "rust-argon2",
"rustls", "rustls",
"rustls-native-certs", "rustls-native-certs",
@ -339,11 +354,46 @@ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
] ]
[[package]]
name = "crossbeam"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch",
"crossbeam-queue",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]] [[package]]
name = "crossbeam-epoch" name = "crossbeam-epoch"
version = "0.9.3" version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"crossbeam-utils", "crossbeam-utils",
@ -352,13 +402,22 @@ dependencies = [
"scopeguard", "scopeguard",
] ]
[[package]]
name = "crossbeam-queue"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-utils",
]
[[package]] [[package]]
name = "crossbeam-utils" name = "crossbeam-utils"
version = "0.8.3" version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db"
dependencies = [ dependencies = [
"autocfg",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"lazy_static", "lazy_static",
] ]
@ -547,6 +606,18 @@ dependencies = [
"termcolor", "termcolor",
] ]
[[package]]
name = "fallible-iterator"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
[[package]]
name = "fallible-streaming-iterator"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
[[package]] [[package]]
name = "figment" name = "figment"
version = "0.10.5" version = "0.10.5"
@ -774,6 +845,24 @@ version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
[[package]]
name = "hashbrown"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
dependencies = [
"ahash",
]
[[package]]
name = "hashlink"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf"
dependencies = [
"hashbrown 0.11.2",
]
[[package]] [[package]]
name = "heck" name = "heck"
version = "0.3.2" version = "0.3.2"
@ -920,7 +1009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3"
dependencies = [ dependencies = [
"autocfg", "autocfg",
"hashbrown", "hashbrown 0.9.1",
"serde", "serde",
] ]
@ -1083,6 +1172,17 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "libsqlite3-sys"
version = "0.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d"
dependencies = [
"cc",
"pkg-config",
"vcpkg",
]
[[package]] [[package]]
name = "linked-hash-map" name = "linked-hash-map"
version = "0.5.4" version = "0.5.4"
@ -1484,6 +1584,12 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "pkg-config"
version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
[[package]] [[package]]
name = "png" name = "png"
version = "0.16.8" version = "0.16.8"
@ -2136,6 +2242,21 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "rusqlite"
version = "0.25.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57adcf67c8faaf96f3248c2a7b419a0dbc52ebe36ba83dd57fe83827c1ea4eb3"
dependencies = [
"bitflags",
"fallible-iterator",
"fallible-streaming-iterator",
"hashlink",
"libsqlite3-sys",
"memchr",
"smallvec",
]
[[package]] [[package]]
name = "rust-argon2" name = "rust-argon2"
version = "0.8.3" version = "0.8.3"
@ -3007,6 +3128,12 @@ dependencies = [
"percent-encoding", "percent-encoding",
] ]
[[package]]
name = "vcpkg"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa"
[[package]] [[package]]
name = "version_check" name = "version_check"
version = "0.9.3" version = "0.9.3"

8
Cargo.toml

@ -73,11 +73,17 @@ tracing-opentelemetry = "0.11.0"
opentelemetry-jaeger = "0.11.0" opentelemetry-jaeger = "0.11.0"
pretty_env_logger = "0.4.0" pretty_env_logger = "0.4.0"
lru-cache = "0.1.2" lru-cache = "0.1.2"
rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] }
parking_lot = { version = "0.11.1", optional = true }
crossbeam = { version = "0.8.1", optional = true }
num_cpus = { version = "1.13.0", optional = true }
[features] [features]
default = ["conduit_bin", "backend_sled"] default = ["conduit_bin", "backend_sqlite"]
backend_sled = ["sled"] backend_sled = ["sled"]
backend_rocksdb = ["rocksdb"] backend_rocksdb = ["rocksdb"]
backend_sqlite = ["sqlite"]
sqlite = ["rusqlite", "parking_lot", "crossbeam", "num_cpus", "tokio/signal"]
conduit_bin = [] # TODO: add rocket to this when it is optional conduit_bin = [] # TODO: add rocket to this when it is optional
[[bin]] [[bin]]

2
DEPLOY.md

@ -106,7 +106,7 @@ allow_federation = true
trusted_servers = ["matrix.org"] trusted_servers = ["matrix.org"]
#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #db_cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
#workers = 4 # default: cpu core count * 2 #workers = 4 # default: cpu core count * 2

2
conduit-example.toml

@ -35,7 +35,7 @@ max_request_size = 20_000_000 # in bytes
trusted_servers = ["matrix.org"] trusted_servers = ["matrix.org"]
#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #db_cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
#log = "info,state_res=warn,rocket=off,_=off,sled=off" #log = "info,state_res=warn,rocket=off,_=off,sled=off"
#workers = 4 # default: cpu core count * 2 #workers = 4 # default: cpu core count * 2

2
debian/postinst vendored

@ -73,7 +73,7 @@ max_request_size = 20_000_000 # in bytes
# Enable jaeger to support monitoring and troubleshooting through jaeger. # Enable jaeger to support monitoring and troubleshooting through jaeger.
#allow_jaeger = false #allow_jaeger = false
#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024 #db_cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
#log = "info,state_res=warn,rocket=off,_=off,sled=off" #log = "info,state_res=warn,rocket=off,_=off,sled=off"
#workers = 4 # default: cpu core count * 2 #workers = 4 # default: cpu core count * 2

45
src/client_server/account.rs

@ -1,7 +1,7 @@
use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
use log::info; use log::info;
use ruma::{ use ruma::{
api::client::{ api::client::{
@ -42,7 +42,7 @@ const GUEST_NAME_LENGTH: usize = 10;
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_register_available_route( pub async fn get_register_available_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_username_availability::Request<'_>>, body: Ruma<get_username_availability::Request<'_>>,
) -> ConduitResult<get_username_availability::Response> { ) -> ConduitResult<get_username_availability::Response> {
// Validate user id // Validate user id
@ -85,7 +85,7 @@ pub async fn get_register_available_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn register_route( pub async fn register_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<register::Request<'_>>, body: Ruma<register::Request<'_>>,
) -> ConduitResult<register::Response> { ) -> ConduitResult<register::Response> {
if !db.globals.allow_registration() && !body.from_appservice { if !db.globals.allow_registration() && !body.from_appservice {
@ -236,6 +236,16 @@ pub async fn register_route(
let room_id = RoomId::new(db.globals.server_name()); let room_id = RoomId::new(db.globals.server_name());
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone()); let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone());
content.federate = true; content.federate = true;
content.predecessor = None; content.predecessor = None;
@ -253,6 +263,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 2. Make conduit bot join // 2. Make conduit bot join
@ -274,6 +285,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 3. Power levels // 3. Power levels
@ -298,6 +310,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.1 Join Rules // 4.1 Join Rules
@ -315,6 +328,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.2 History Visibility // 4.2 History Visibility
@ -334,6 +348,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.3 Guest Access // 4.3 Guest Access
@ -351,6 +366,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 6. Events implied by name and topic // 6. Events implied by name and topic
@ -370,6 +386,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
@ -386,6 +403,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// Room alias // Room alias
@ -408,6 +426,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
@ -431,6 +450,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
@ -450,6 +470,7 @@ pub async fn register_route(
&user_id, &user_id,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// Send welcome message // Send welcome message
@ -468,6 +489,7 @@ pub async fn register_route(
&conduit_user, &conduit_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }
@ -496,7 +518,7 @@ pub async fn register_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn change_password_route( pub async fn change_password_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<change_password::Request<'_>>, body: Ruma<change_password::Request<'_>>,
) -> ConduitResult<change_password::Response> { ) -> ConduitResult<change_password::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -588,7 +610,7 @@ pub async fn whoami_route(body: Ruma<whoami::Request>) -> ConduitResult<whoami::
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn deactivate_route( pub async fn deactivate_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<deactivate::Request<'_>>, body: Ruma<deactivate::Request<'_>>,
) -> ConduitResult<deactivate::Response> { ) -> ConduitResult<deactivate::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -643,6 +665,16 @@ pub async fn deactivate_route(
third_party_invite: None, third_party_invite: None,
}; };
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
@ -654,6 +686,7 @@ pub async fn deactivate_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }

11
src/client_server/alias.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use regex::Regex; use regex::Regex;
use ruma::{ use ruma::{
api::{ api::{
@ -24,7 +21,7 @@ use rocket::{delete, get, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn create_alias_route( pub async fn create_alias_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_alias::Request<'_>>, body: Ruma<create_alias::Request<'_>>,
) -> ConduitResult<create_alias::Response> { ) -> ConduitResult<create_alias::Response> {
if db.rooms.id_from_alias(&body.room_alias)?.is_some() { if db.rooms.id_from_alias(&body.room_alias)?.is_some() {
@ -45,7 +42,7 @@ pub async fn create_alias_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_alias_route( pub async fn delete_alias_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_alias::Request<'_>>, body: Ruma<delete_alias::Request<'_>>,
) -> ConduitResult<delete_alias::Response> { ) -> ConduitResult<delete_alias::Response> {
db.rooms.set_alias(&body.room_alias, None, &db.globals)?; db.rooms.set_alias(&body.room_alias, None, &db.globals)?;
@ -61,7 +58,7 @@ pub async fn delete_alias_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_alias_route( pub async fn get_alias_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_alias::Request<'_>>, body: Ruma<get_alias::Request<'_>>,
) -> ConduitResult<get_alias::Response> { ) -> ConduitResult<get_alias::Response> {
get_alias_helper(&db, &body.room_alias).await get_alias_helper(&db, &body.room_alias).await

33
src/client_server/backup.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::api::client::{ use ruma::api::client::{
error::ErrorKind, error::ErrorKind,
r0::backup::{ r0::backup::{
@ -21,7 +18,7 @@ use rocket::{delete, get, post, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn create_backup_route( pub async fn create_backup_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_backup::Request>, body: Ruma<create_backup::Request>,
) -> ConduitResult<create_backup::Response> { ) -> ConduitResult<create_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -40,7 +37,7 @@ pub async fn create_backup_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn update_backup_route( pub async fn update_backup_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<update_backup::Request<'_>>, body: Ruma<update_backup::Request<'_>>,
) -> ConduitResult<update_backup::Response> { ) -> ConduitResult<update_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -58,7 +55,7 @@ pub async fn update_backup_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_latest_backup_route( pub async fn get_latest_backup_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_latest_backup::Request>, body: Ruma<get_latest_backup::Request>,
) -> ConduitResult<get_latest_backup::Response> { ) -> ConduitResult<get_latest_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -86,7 +83,7 @@ pub async fn get_latest_backup_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_backup_route( pub async fn get_backup_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_backup::Request<'_>>, body: Ruma<get_backup::Request<'_>>,
) -> ConduitResult<get_backup::Response> { ) -> ConduitResult<get_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -113,7 +110,7 @@ pub async fn get_backup_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_backup_route( pub async fn delete_backup_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_backup::Request<'_>>, body: Ruma<delete_backup::Request<'_>>,
) -> ConduitResult<delete_backup::Response> { ) -> ConduitResult<delete_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -132,7 +129,7 @@ pub async fn delete_backup_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn add_backup_keys_route( pub async fn add_backup_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<add_backup_keys::Request<'_>>, body: Ruma<add_backup_keys::Request<'_>>,
) -> ConduitResult<add_backup_keys::Response> { ) -> ConduitResult<add_backup_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -166,7 +163,7 @@ pub async fn add_backup_keys_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn add_backup_key_sessions_route( pub async fn add_backup_key_sessions_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<add_backup_key_sessions::Request<'_>>, body: Ruma<add_backup_key_sessions::Request<'_>>,
) -> ConduitResult<add_backup_key_sessions::Response> { ) -> ConduitResult<add_backup_key_sessions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -198,7 +195,7 @@ pub async fn add_backup_key_sessions_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn add_backup_key_session_route( pub async fn add_backup_key_session_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<add_backup_key_session::Request<'_>>, body: Ruma<add_backup_key_session::Request<'_>>,
) -> ConduitResult<add_backup_key_session::Response> { ) -> ConduitResult<add_backup_key_session::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -227,7 +224,7 @@ pub async fn add_backup_key_session_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_backup_keys_route( pub async fn get_backup_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_backup_keys::Request<'_>>, body: Ruma<get_backup_keys::Request<'_>>,
) -> ConduitResult<get_backup_keys::Response> { ) -> ConduitResult<get_backup_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -243,7 +240,7 @@ pub async fn get_backup_keys_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_backup_key_sessions_route( pub async fn get_backup_key_sessions_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_backup_key_sessions::Request<'_>>, body: Ruma<get_backup_key_sessions::Request<'_>>,
) -> ConduitResult<get_backup_key_sessions::Response> { ) -> ConduitResult<get_backup_key_sessions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -261,7 +258,7 @@ pub async fn get_backup_key_sessions_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_backup_key_session_route( pub async fn get_backup_key_session_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_backup_key_session::Request<'_>>, body: Ruma<get_backup_key_session::Request<'_>>,
) -> ConduitResult<get_backup_key_session::Response> { ) -> ConduitResult<get_backup_key_session::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -283,7 +280,7 @@ pub async fn get_backup_key_session_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_backup_keys_route( pub async fn delete_backup_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_backup_keys::Request<'_>>, body: Ruma<delete_backup_keys::Request<'_>>,
) -> ConduitResult<delete_backup_keys::Response> { ) -> ConduitResult<delete_backup_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -306,7 +303,7 @@ pub async fn delete_backup_keys_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_backup_key_sessions_route( pub async fn delete_backup_key_sessions_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_backup_key_sessions::Request<'_>>, body: Ruma<delete_backup_key_sessions::Request<'_>>,
) -> ConduitResult<delete_backup_key_sessions::Response> { ) -> ConduitResult<delete_backup_key_sessions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -329,7 +326,7 @@ pub async fn delete_backup_key_sessions_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_backup_key_session_route( pub async fn delete_backup_key_session_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_backup_key_session::Request<'_>>, body: Ruma<delete_backup_key_session::Request<'_>>,
) -> ConduitResult<delete_backup_key_session::Response> { ) -> ConduitResult<delete_backup_key_session::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

13
src/client_server/config.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::{ use ruma::{
api::client::{ api::client::{
error::ErrorKind, error::ErrorKind,
@ -25,7 +22,7 @@ use rocket::{get, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_global_account_data_route( pub async fn set_global_account_data_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_global_account_data::Request<'_>>, body: Ruma<set_global_account_data::Request<'_>>,
) -> ConduitResult<set_global_account_data::Response> { ) -> ConduitResult<set_global_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -60,7 +57,7 @@ pub async fn set_global_account_data_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_room_account_data_route( pub async fn set_room_account_data_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_room_account_data::Request<'_>>, body: Ruma<set_room_account_data::Request<'_>>,
) -> ConduitResult<set_room_account_data::Response> { ) -> ConduitResult<set_room_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -92,7 +89,7 @@ pub async fn set_room_account_data_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_global_account_data_route( pub async fn get_global_account_data_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_global_account_data::Request<'_>>, body: Ruma<get_global_account_data::Request<'_>>,
) -> ConduitResult<get_global_account_data::Response> { ) -> ConduitResult<get_global_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -119,7 +116,7 @@ pub async fn get_global_account_data_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_room_account_data_route( pub async fn get_room_account_data_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_room_account_data::Request<'_>>, body: Ruma<get_room_account_data::Request<'_>>,
) -> ConduitResult<get_room_account_data::Response> { ) -> ConduitResult<get_room_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

7
src/client_server/context.rs

@ -1,7 +1,6 @@
use super::State; use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::api::client::{error::ErrorKind, r0::context::get_context}; use ruma::api::client::{error::ErrorKind, r0::context::get_context};
use std::{convert::TryFrom, sync::Arc}; use std::convert::TryFrom;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::get; use rocket::get;
@ -12,7 +11,7 @@ use rocket::get;
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_context_route( pub async fn get_context_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_context::Request<'_>>, body: Ruma<get_context::Request<'_>>,
) -> ConduitResult<get_context::Response> { ) -> ConduitResult<get_context::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

15
src/client_server/device.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma};
use super::State;
use crate::{utils, ConduitResult, Database, Error, Ruma};
use ruma::api::client::{ use ruma::api::client::{
error::ErrorKind, error::ErrorKind,
r0::{ r0::{
@ -20,7 +17,7 @@ use rocket::{delete, get, post, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_devices_route( pub async fn get_devices_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_devices::Request>, body: Ruma<get_devices::Request>,
) -> ConduitResult<get_devices::Response> { ) -> ConduitResult<get_devices::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -40,7 +37,7 @@ pub async fn get_devices_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_device_route( pub async fn get_device_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_device::Request<'_>>, body: Ruma<get_device::Request<'_>>,
) -> ConduitResult<get_device::Response> { ) -> ConduitResult<get_device::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -59,7 +56,7 @@ pub async fn get_device_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn update_device_route( pub async fn update_device_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<update_device::Request<'_>>, body: Ruma<update_device::Request<'_>>,
) -> ConduitResult<update_device::Response> { ) -> ConduitResult<update_device::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -85,7 +82,7 @@ pub async fn update_device_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_device_route( pub async fn delete_device_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_device::Request<'_>>, body: Ruma<delete_device::Request<'_>>,
) -> ConduitResult<delete_device::Response> { ) -> ConduitResult<delete_device::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -139,7 +136,7 @@ pub async fn delete_device_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_devices_route( pub async fn delete_devices_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_devices::Request<'_>>, body: Ruma<delete_devices::Request<'_>>,
) -> ConduitResult<delete_devices::Response> { ) -> ConduitResult<delete_devices::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

13
src/client_server/directory.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Result, Ruma};
use log::info; use log::info;
use ruma::{ use ruma::{
api::{ api::{
@ -35,7 +32,7 @@ use rocket::{get, post, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_public_rooms_filtered_route( pub async fn get_public_rooms_filtered_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_public_rooms_filtered::Request<'_>>, body: Ruma<get_public_rooms_filtered::Request<'_>>,
) -> ConduitResult<get_public_rooms_filtered::Response> { ) -> ConduitResult<get_public_rooms_filtered::Response> {
get_public_rooms_filtered_helper( get_public_rooms_filtered_helper(
@ -55,7 +52,7 @@ pub async fn get_public_rooms_filtered_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_public_rooms_route( pub async fn get_public_rooms_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_public_rooms::Request<'_>>, body: Ruma<get_public_rooms::Request<'_>>,
) -> ConduitResult<get_public_rooms::Response> { ) -> ConduitResult<get_public_rooms::Response> {
let response = get_public_rooms_filtered_helper( let response = get_public_rooms_filtered_helper(
@ -84,7 +81,7 @@ pub async fn get_public_rooms_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_room_visibility_route( pub async fn set_room_visibility_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_room_visibility::Request<'_>>, body: Ruma<set_room_visibility::Request<'_>>,
) -> ConduitResult<set_room_visibility::Response> { ) -> ConduitResult<set_room_visibility::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -114,7 +111,7 @@ pub async fn set_room_visibility_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_room_visibility_route( pub async fn get_room_visibility_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_room_visibility::Request<'_>>, body: Ruma<get_room_visibility::Request<'_>>,
) -> ConduitResult<get_room_visibility::Response> { ) -> ConduitResult<get_room_visibility::Response> {
Ok(get_room_visibility::Response { Ok(get_room_visibility::Response {

21
src/client_server/keys.rs

@ -1,5 +1,5 @@
use super::{State, SESSION_ID_LENGTH}; use super::SESSION_ID_LENGTH;
use crate::{utils, ConduitResult, Database, Error, Result, Ruma}; use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma};
use ruma::{ use ruma::{
api::client::{ api::client::{
error::ErrorKind, error::ErrorKind,
@ -14,10 +14,7 @@ use ruma::{
encryption::UnsignedDeviceInfo, encryption::UnsignedDeviceInfo,
DeviceId, DeviceKeyAlgorithm, UserId, DeviceId, DeviceKeyAlgorithm, UserId,
}; };
use std::{ use std::collections::{BTreeMap, HashSet};
collections::{BTreeMap, HashSet},
sync::Arc,
};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, post}; use rocket::{get, post};
@ -28,7 +25,7 @@ use rocket::{get, post};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn upload_keys_route( pub async fn upload_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<upload_keys::Request>, body: Ruma<upload_keys::Request>,
) -> ConduitResult<upload_keys::Response> { ) -> ConduitResult<upload_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -77,7 +74,7 @@ pub async fn upload_keys_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_keys_route( pub async fn get_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_keys::Request<'_>>, body: Ruma<get_keys::Request<'_>>,
) -> ConduitResult<get_keys::Response> { ) -> ConduitResult<get_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -98,7 +95,7 @@ pub async fn get_keys_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn claim_keys_route( pub async fn claim_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<claim_keys::Request>, body: Ruma<claim_keys::Request>,
) -> ConduitResult<claim_keys::Response> { ) -> ConduitResult<claim_keys::Response> {
let response = claim_keys_helper(&body.one_time_keys, &db)?; let response = claim_keys_helper(&body.one_time_keys, &db)?;
@ -114,7 +111,7 @@ pub async fn claim_keys_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn upload_signing_keys_route( pub async fn upload_signing_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<upload_signing_keys::Request<'_>>, body: Ruma<upload_signing_keys::Request<'_>>,
) -> ConduitResult<upload_signing_keys::Response> { ) -> ConduitResult<upload_signing_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -177,7 +174,7 @@ pub async fn upload_signing_keys_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn upload_signatures_route( pub async fn upload_signatures_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<upload_signatures::Request>, body: Ruma<upload_signatures::Request>,
) -> ConduitResult<upload_signatures::Response> { ) -> ConduitResult<upload_signatures::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -238,7 +235,7 @@ pub async fn upload_signatures_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_key_changes_route( pub async fn get_key_changes_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_key_changes::Request<'_>>, body: Ruma<get_key_changes::Request<'_>>,
) -> ConduitResult<get_key_changes::Response> { ) -> ConduitResult<get_key_changes::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

15
src/client_server/media.rs

@ -1,20 +1,21 @@
use super::State; use crate::{
use crate::{database::media::FileMeta, utils, ConduitResult, Database, Error, Ruma}; database::media::FileMeta, database::DatabaseGuard, utils, ConduitResult, Error, Ruma,
};
use ruma::api::client::{ use ruma::api::client::{
error::ErrorKind, error::ErrorKind,
r0::media::{create_content, get_content, get_content_thumbnail, get_media_config}, r0::media::{create_content, get_content, get_content_thumbnail, get_media_config},
}; };
use std::convert::TryInto;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, post}; use rocket::{get, post};
use std::{convert::TryInto, sync::Arc};
const MXC_LENGTH: usize = 32; const MXC_LENGTH: usize = 32;
#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] #[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))]
#[tracing::instrument(skip(db))] #[tracing::instrument(skip(db))]
pub async fn get_media_config_route( pub async fn get_media_config_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
) -> ConduitResult<get_media_config::Response> { ) -> ConduitResult<get_media_config::Response> {
Ok(get_media_config::Response { Ok(get_media_config::Response {
upload_size: db.globals.max_request_size().into(), upload_size: db.globals.max_request_size().into(),
@ -28,7 +29,7 @@ pub async fn get_media_config_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn create_content_route( pub async fn create_content_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_content::Request<'_>>, body: Ruma<create_content::Request<'_>>,
) -> ConduitResult<create_content::Response> { ) -> ConduitResult<create_content::Response> {
let mxc = format!( let mxc = format!(
@ -66,7 +67,7 @@ pub async fn create_content_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_content_route( pub async fn get_content_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_content::Request<'_>>, body: Ruma<get_content::Request<'_>>,
) -> ConduitResult<get_content::Response> { ) -> ConduitResult<get_content::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
@ -119,7 +120,7 @@ pub async fn get_content_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_content_thumbnail_route( pub async fn get_content_thumbnail_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_content_thumbnail::Request<'_>>, body: Ruma<get_content_thumbnail::Request<'_>>,
) -> ConduitResult<get_content_thumbnail::Response> { ) -> ConduitResult<get_content_thumbnail::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);

117
src/client_server/membership.rs

@ -1,6 +1,6 @@
use super::State;
use crate::{ use crate::{
client_server, client_server,
database::DatabaseGuard,
pdu::{PduBuilder, PduEvent}, pdu::{PduBuilder, PduEvent},
server_server, utils, ConduitResult, Database, Error, Result, Ruma, server_server, utils, ConduitResult, Database, Error, Result, Ruma,
}; };
@ -44,7 +44,7 @@ use rocket::{get, post};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn join_room_by_id_route( pub async fn join_room_by_id_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<join_room_by_id::Request<'_>>, body: Ruma<join_room_by_id::Request<'_>>,
) -> ConduitResult<join_room_by_id::Response> { ) -> ConduitResult<join_room_by_id::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -65,14 +65,18 @@ pub async fn join_room_by_id_route(
servers.insert(body.room_id.server_name().to_owned()); servers.insert(body.room_id.server_name().to_owned());
join_room_by_id_helper( let ret = join_room_by_id_helper(
&db, &db,
body.sender_user.as_ref(), body.sender_user.as_ref(),
&body.room_id, &body.room_id,
&servers, &servers,
body.third_party_signed.as_ref(), body.third_party_signed.as_ref(),
) )
.await .await;
db.flush().await?;
ret
} }
#[cfg_attr( #[cfg_attr(
@ -81,7 +85,7 @@ pub async fn join_room_by_id_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn join_room_by_id_or_alias_route( pub async fn join_room_by_id_or_alias_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<join_room_by_id_or_alias::Request<'_>>, body: Ruma<join_room_by_id_or_alias::Request<'_>>,
) -> ConduitResult<join_room_by_id_or_alias::Response> { ) -> ConduitResult<join_room_by_id_or_alias::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -135,7 +139,7 @@ pub async fn join_room_by_id_or_alias_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn leave_room_route( pub async fn leave_room_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<leave_room::Request<'_>>, body: Ruma<leave_room::Request<'_>>,
) -> ConduitResult<leave_room::Response> { ) -> ConduitResult<leave_room::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -153,7 +157,7 @@ pub async fn leave_room_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn invite_user_route( pub async fn invite_user_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<invite_user::Request<'_>>, body: Ruma<invite_user::Request<'_>>,
) -> ConduitResult<invite_user::Response> { ) -> ConduitResult<invite_user::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -173,7 +177,7 @@ pub async fn invite_user_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn kick_user_route( pub async fn kick_user_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<kick_user::Request<'_>>, body: Ruma<kick_user::Request<'_>>,
) -> ConduitResult<kick_user::Response> { ) -> ConduitResult<kick_user::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -199,6 +203,16 @@ pub async fn kick_user_route(
event.membership = ruma::events::room::member::MembershipState::Leave; event.membership = ruma::events::room::member::MembershipState::Leave;
// TODO: reason // TODO: reason
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
@ -210,8 +224,11 @@ pub async fn kick_user_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(kick_user::Response::new().into()) Ok(kick_user::Response::new().into())
@ -223,7 +240,7 @@ pub async fn kick_user_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn ban_user_route( pub async fn ban_user_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<ban_user::Request<'_>>, body: Ruma<ban_user::Request<'_>>,
) -> ConduitResult<ban_user::Response> { ) -> ConduitResult<ban_user::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -257,6 +274,16 @@ pub async fn ban_user_route(
}, },
)?; )?;
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
@ -268,8 +295,11 @@ pub async fn ban_user_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(ban_user::Response::new().into()) Ok(ban_user::Response::new().into())
@ -281,7 +311,7 @@ pub async fn ban_user_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn unban_user_route( pub async fn unban_user_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<unban_user::Request<'_>>, body: Ruma<unban_user::Request<'_>>,
) -> ConduitResult<unban_user::Response> { ) -> ConduitResult<unban_user::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -306,6 +336,16 @@ pub async fn unban_user_route(
event.membership = ruma::events::room::member::MembershipState::Leave; event.membership = ruma::events::room::member::MembershipState::Leave;
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
@ -317,8 +357,11 @@ pub async fn unban_user_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(unban_user::Response::new().into()) Ok(unban_user::Response::new().into())
@ -330,7 +373,7 @@ pub async fn unban_user_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn forget_room_route( pub async fn forget_room_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<forget_room::Request<'_>>, body: Ruma<forget_room::Request<'_>>,
) -> ConduitResult<forget_room::Response> { ) -> ConduitResult<forget_room::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -348,7 +391,7 @@ pub async fn forget_room_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn joined_rooms_route( pub async fn joined_rooms_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<joined_rooms::Request>, body: Ruma<joined_rooms::Request>,
) -> ConduitResult<joined_rooms::Response> { ) -> ConduitResult<joined_rooms::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -369,7 +412,7 @@ pub async fn joined_rooms_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_member_events_route( pub async fn get_member_events_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_member_events::Request<'_>>, body: Ruma<get_member_events::Request<'_>>,
) -> ConduitResult<get_member_events::Response> { ) -> ConduitResult<get_member_events::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -399,7 +442,7 @@ pub async fn get_member_events_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn joined_members_route( pub async fn joined_members_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<joined_members::Request<'_>>, body: Ruma<joined_members::Request<'_>>,
) -> ConduitResult<joined_members::Response> { ) -> ConduitResult<joined_members::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -442,6 +485,16 @@ async fn join_room_by_id_helper(
) -> ConduitResult<join_room_by_id::Response> { ) -> ConduitResult<join_room_by_id::Response> {
let sender_user = sender_user.expect("user is authenticated"); let sender_user = sender_user.expect("user is authenticated");
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// Ask a remote server if we don't have this room // Ask a remote server if we don't have this room
if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() { if !db.rooms.exists(&room_id)? && room_id.server_name() != db.globals.server_name() {
let mut make_join_response_and_server = Err(Error::BadServerResponse( let mut make_join_response_and_server = Err(Error::BadServerResponse(
@ -615,16 +668,9 @@ async fn join_room_by_id_helper(
// pdu without it's state. This is okay because append_pdu can't fail. // pdu without it's state. This is okay because append_pdu can't fail.
let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?;
let count = db.globals.next_count()?;
let mut pdu_id = room_id.as_bytes().to_vec();
pdu_id.push(0xff);
pdu_id.extend_from_slice(&count.to_be_bytes());
db.rooms.append_pdu( db.rooms.append_pdu(
&pdu, &pdu,
utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"),
count,
&pdu_id,
&[pdu.event_id.clone()], &[pdu.event_id.clone()],
db, db,
)?; )?;
@ -652,9 +698,12 @@ async fn join_room_by_id_helper(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(join_room_by_id::Response::new(room_id.clone()).into()) Ok(join_room_by_id::Response::new(room_id.clone()).into())
@ -724,13 +773,23 @@ async fn validate_and_add_event_id(
Ok((event_id, value)) Ok((event_id, value))
} }
pub async fn invite_helper( pub async fn invite_helper<'a>(
sender_user: &UserId, sender_user: &UserId,
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
is_direct: bool, is_direct: bool,
) -> Result<()> { ) -> Result<()> {
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
if user_id.server_name() != db.globals.server_name() { if user_id.server_name() != db.globals.server_name() {
let prev_events = db let prev_events = db
.rooms .rooms
@ -866,6 +925,8 @@ pub async fn invite_helper(
) )
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
drop(mutex_lock);
let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; let invite_room_state = db.rooms.calculate_invite_state(&pdu)?;
let response = db let response = db
.sending .sending
@ -905,8 +966,15 @@ pub async fn invite_helper(
) )
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?;
let pdu_id = let pdu_id = server_server::handle_incoming_pdu(
server_server::handle_incoming_pdu(&origin, &event_id, value, true, &db, &pub_key_map) &origin,
&event_id,
&room_id,
value,
true,
&db,
&pub_key_map,
)
.await .await
.map_err(|_| { .map_err(|_| {
Error::BadRequest( Error::BadRequest(
@ -949,6 +1017,7 @@ pub async fn invite_helper(
&sender_user, &sender_user,
room_id, room_id,
&db, &db,
&mutex_lock,
)?; )?;
Ok(()) Ok(())

20
src/client_server/message.rs

@ -1,5 +1,4 @@
use super::State; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma};
use ruma::{ use ruma::{
api::client::{ api::client::{
error::ErrorKind, error::ErrorKind,
@ -23,12 +22,22 @@ use rocket::{get, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn send_message_event_route( pub async fn send_message_event_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<send_message_event::Request<'_>>, body: Ruma<send_message_event::Request<'_>>,
) -> ConduitResult<send_message_event::Response> { ) -> ConduitResult<send_message_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref(); let sender_device = body.sender_device.as_deref();
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// Check if this is a new transaction id // Check if this is a new transaction id
if let Some(response) = if let Some(response) =
db.transaction_ids db.transaction_ids
@ -66,6 +75,7 @@ pub async fn send_message_event_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
db.transaction_ids.add_txnid( db.transaction_ids.add_txnid(
@ -75,6 +85,8 @@ pub async fn send_message_event_route(
event_id.as_bytes(), event_id.as_bytes(),
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(send_message_event::Response::new(event_id).into()) Ok(send_message_event::Response::new(event_id).into())
@ -86,7 +98,7 @@ pub async fn send_message_event_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_message_events_route( pub async fn get_message_events_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_message_events::Request<'_>>, body: Ruma<get_message_events::Request<'_>>,
) -> ConduitResult<get_message_events::Response> { ) -> ConduitResult<get_message_events::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

4
src/client_server/mod.rs

@ -64,9 +64,7 @@ pub use voip::*;
use super::State; use super::State;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use { use {
crate::ConduitResult, crate::ConduitResult, rocket::options, ruma::api::client::r0::to_device::send_event_to_device,
rocket::{options, State},
ruma::api::client::r0::to_device::send_event_to_device,
}; };
pub const DEVICE_ID_LENGTH: usize = 10; pub const DEVICE_ID_LENGTH: usize = 10;

9
src/client_server/presence.rs

@ -1,7 +1,6 @@
use super::State; use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma};
use crate::{utils, ConduitResult, Database, Ruma};
use ruma::api::client::r0::presence::{get_presence, set_presence}; use ruma::api::client::r0::presence::{get_presence, set_presence};
use std::{convert::TryInto, sync::Arc, time::Duration}; use std::{convert::TryInto, time::Duration};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, put}; use rocket::{get, put};
@ -12,7 +11,7 @@ use rocket::{get, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_presence_route( pub async fn set_presence_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_presence::Request<'_>>, body: Ruma<set_presence::Request<'_>>,
) -> ConduitResult<set_presence::Response> { ) -> ConduitResult<set_presence::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -53,7 +52,7 @@ pub async fn set_presence_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_presence_route( pub async fn get_presence_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_presence::Request<'_>>, body: Ruma<get_presence::Request<'_>>,
) -> ConduitResult<get_presence::Response> { ) -> ConduitResult<get_presence::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

47
src/client_server/profile.rs

@ -1,5 +1,4 @@
use super::State; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma};
use ruma::{ use ruma::{
api::client::{ api::client::{
error::ErrorKind, error::ErrorKind,
@ -10,10 +9,10 @@ use ruma::{
events::EventType, events::EventType,
serde::Raw, serde::Raw,
}; };
use std::{convert::TryInto, sync::Arc};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, put}; use rocket::{get, put};
use std::{convert::TryInto, sync::Arc};
#[cfg_attr( #[cfg_attr(
feature = "conduit_bin", feature = "conduit_bin",
@ -21,7 +20,7 @@ use std::{convert::TryInto, sync::Arc};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_displayname_route( pub async fn set_displayname_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_display_name::Request<'_>>, body: Ruma<set_display_name::Request<'_>>,
) -> ConduitResult<set_display_name::Response> { ) -> ConduitResult<set_display_name::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -70,9 +69,19 @@ pub async fn set_displayname_route(
}) })
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
{ {
let _ = db let mutex = Arc::clone(
.rooms db.globals
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db); .roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let _ =
db.rooms
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock);
// Presence update // Presence update
db.rooms.edus.update_presence( db.rooms.edus.update_presence(
@ -108,7 +117,7 @@ pub async fn set_displayname_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_displayname_route( pub async fn get_displayname_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_display_name::Request<'_>>, body: Ruma<get_display_name::Request<'_>>,
) -> ConduitResult<get_display_name::Response> { ) -> ConduitResult<get_display_name::Response> {
Ok(get_display_name::Response { Ok(get_display_name::Response {
@ -123,7 +132,7 @@ pub async fn get_displayname_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_avatar_url_route( pub async fn set_avatar_url_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_avatar_url::Request<'_>>, body: Ruma<set_avatar_url::Request<'_>>,
) -> ConduitResult<set_avatar_url::Response> { ) -> ConduitResult<set_avatar_url::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -172,9 +181,19 @@ pub async fn set_avatar_url_route(
}) })
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
{ {
let _ = db let mutex = Arc::clone(
.rooms db.globals
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db); .roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let _ =
db.rooms
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock);
// Presence update // Presence update
db.rooms.edus.update_presence( db.rooms.edus.update_presence(
@ -210,7 +229,7 @@ pub async fn set_avatar_url_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_avatar_url_route( pub async fn get_avatar_url_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_avatar_url::Request<'_>>, body: Ruma<get_avatar_url::Request<'_>>,
) -> ConduitResult<get_avatar_url::Response> { ) -> ConduitResult<get_avatar_url::Response> {
Ok(get_avatar_url::Response { Ok(get_avatar_url::Response {
@ -225,7 +244,7 @@ pub async fn get_avatar_url_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_profile_route( pub async fn get_profile_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_profile::Request<'_>>, body: Ruma<get_profile::Request<'_>>,
) -> ConduitResult<get_profile::Response> { ) -> ConduitResult<get_profile::Response> {
if !db.users.exists(&body.user_id)? { if !db.users.exists(&body.user_id)? {

25
src/client_server/push.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::{ use ruma::{
api::client::{ api::client::{
error::ErrorKind, error::ErrorKind,
@ -24,7 +21,7 @@ use rocket::{delete, get, post, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_pushrules_all_route( pub async fn get_pushrules_all_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_pushrules_all::Request>, body: Ruma<get_pushrules_all::Request>,
) -> ConduitResult<get_pushrules_all::Response> { ) -> ConduitResult<get_pushrules_all::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -49,7 +46,7 @@ pub async fn get_pushrules_all_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_pushrule_route( pub async fn get_pushrule_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_pushrule::Request<'_>>, body: Ruma<get_pushrule::Request<'_>>,
) -> ConduitResult<get_pushrule::Response> { ) -> ConduitResult<get_pushrule::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -103,7 +100,7 @@ pub async fn get_pushrule_route(
)] )]
#[tracing::instrument(skip(db, req))] #[tracing::instrument(skip(db, req))]
pub async fn set_pushrule_route( pub async fn set_pushrule_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
req: Ruma<set_pushrule::Request<'_>>, req: Ruma<set_pushrule::Request<'_>>,
) -> ConduitResult<set_pushrule::Response> { ) -> ConduitResult<set_pushrule::Response> {
let sender_user = req.sender_user.as_ref().expect("user is authenticated"); let sender_user = req.sender_user.as_ref().expect("user is authenticated");
@ -206,7 +203,7 @@ pub async fn set_pushrule_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_pushrule_actions_route( pub async fn get_pushrule_actions_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_pushrule_actions::Request<'_>>, body: Ruma<get_pushrule_actions::Request<'_>>,
) -> ConduitResult<get_pushrule_actions::Response> { ) -> ConduitResult<get_pushrule_actions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -265,7 +262,7 @@ pub async fn get_pushrule_actions_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_pushrule_actions_route( pub async fn set_pushrule_actions_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_pushrule_actions::Request<'_>>, body: Ruma<set_pushrule_actions::Request<'_>>,
) -> ConduitResult<set_pushrule_actions::Response> { ) -> ConduitResult<set_pushrule_actions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -339,7 +336,7 @@ pub async fn set_pushrule_actions_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_pushrule_enabled_route( pub async fn get_pushrule_enabled_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_pushrule_enabled::Request<'_>>, body: Ruma<get_pushrule_enabled::Request<'_>>,
) -> ConduitResult<get_pushrule_enabled::Response> { ) -> ConduitResult<get_pushrule_enabled::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -400,7 +397,7 @@ pub async fn get_pushrule_enabled_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_pushrule_enabled_route( pub async fn set_pushrule_enabled_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_pushrule_enabled::Request<'_>>, body: Ruma<set_pushrule_enabled::Request<'_>>,
) -> ConduitResult<set_pushrule_enabled::Response> { ) -> ConduitResult<set_pushrule_enabled::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -479,7 +476,7 @@ pub async fn set_pushrule_enabled_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_pushrule_route( pub async fn delete_pushrule_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_pushrule::Request<'_>>, body: Ruma<delete_pushrule::Request<'_>>,
) -> ConduitResult<delete_pushrule::Response> { ) -> ConduitResult<delete_pushrule::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -548,7 +545,7 @@ pub async fn delete_pushrule_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_pushers_route( pub async fn get_pushers_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_pushers::Request>, body: Ruma<get_pushers::Request>,
) -> ConduitResult<get_pushers::Response> { ) -> ConduitResult<get_pushers::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -565,7 +562,7 @@ pub async fn get_pushers_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_pushers_route( pub async fn set_pushers_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_pusher::Request>, body: Ruma<set_pusher::Request>,
) -> ConduitResult<set_pusher::Response> { ) -> ConduitResult<set_pusher::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

9
src/client_server/read_marker.rs

@ -1,5 +1,4 @@
use super::State; use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::{ use ruma::{
api::client::{ api::client::{
error::ErrorKind, error::ErrorKind,
@ -9,10 +8,10 @@ use ruma::{
receipt::ReceiptType, receipt::ReceiptType,
MilliSecondsSinceUnixEpoch, MilliSecondsSinceUnixEpoch,
}; };
use std::collections::BTreeMap;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::post; use rocket::post;
use std::{collections::BTreeMap, sync::Arc};
#[cfg_attr( #[cfg_attr(
feature = "conduit_bin", feature = "conduit_bin",
@ -20,7 +19,7 @@ use std::{collections::BTreeMap, sync::Arc};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn set_read_marker_route( pub async fn set_read_marker_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<set_read_marker::Request<'_>>, body: Ruma<set_read_marker::Request<'_>>,
) -> ConduitResult<set_read_marker::Response> { ) -> ConduitResult<set_read_marker::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -87,7 +86,7 @@ pub async fn set_read_marker_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn create_receipt_route( pub async fn create_receipt_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_receipt::Request<'_>>, body: Ruma<create_receipt::Request<'_>>,
) -> ConduitResult<create_receipt::Response> { ) -> ConduitResult<create_receipt::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

21
src/client_server/redact.rs

@ -1,10 +1,10 @@
use super::State; use std::sync::Arc;
use crate::{pdu::PduBuilder, ConduitResult, Database, Ruma};
use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma};
use ruma::{ use ruma::{
api::client::r0::redact::redact_event, api::client::r0::redact::redact_event,
events::{room::redaction, EventType}, events::{room::redaction, EventType},
}; };
use std::sync::Arc;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::put; use rocket::put;
@ -15,11 +15,21 @@ use rocket::put;
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn redact_event_route( pub async fn redact_event_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<redact_event::Request<'_>>, body: Ruma<redact_event::Request<'_>>,
) -> ConduitResult<redact_event::Response> { ) -> ConduitResult<redact_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let event_id = db.rooms.build_and_append_pdu( let event_id = db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomRedaction, event_type: EventType::RoomRedaction,
@ -34,8 +44,11 @@ pub async fn redact_event_route(
&sender_user, &sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
Ok(redact_event::Response { event_id }.into()) Ok(redact_event::Response { event_id }.into())

50
src/client_server/room.rs

@ -1,5 +1,7 @@
use super::State; use crate::{
use crate::{client_server::invite_helper, pdu::PduBuilder, ConduitResult, Database, Error, Ruma}; client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Error,
Ruma,
};
use log::info; use log::info;
use ruma::{ use ruma::{
api::client::{ api::client::{
@ -24,13 +26,23 @@ use rocket::{get, post};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn create_room_route( pub async fn create_room_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_room::Request<'_>>, body: Ruma<create_room::Request<'_>>,
) -> ConduitResult<create_room::Response> { ) -> ConduitResult<create_room::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let room_id = RoomId::new(db.globals.server_name()); let room_id = RoomId::new(db.globals.server_name());
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let alias = body let alias = body
.room_alias_name .room_alias_name
.as_ref() .as_ref()
@ -67,6 +79,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 2. Let the room creator join // 2. Let the room creator join
@ -88,6 +101,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 3. Power levels // 3. Power levels
@ -142,6 +156,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4. Events set by preset // 4. Events set by preset
@ -168,6 +183,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.2 History Visibility // 4.2 History Visibility
@ -185,6 +201,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 4.3 Guest Access // 4.3 Guest Access
@ -210,6 +227,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
// 5. Events listed in initial_state // 5. Events listed in initial_state
@ -225,7 +243,7 @@ pub async fn create_room_route(
} }
db.rooms db.rooms
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?; .build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db, &mutex_lock)?;
} }
// 6. Events implied by name and topic // 6. Events implied by name and topic
@ -246,6 +264,7 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }
@ -264,10 +283,12 @@ pub async fn create_room_route(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
} }
// 7. Events implied by invite (and TODO: invite_3pid) // 7. Events implied by invite (and TODO: invite_3pid)
drop(mutex_lock);
for user_id in &body.invite { for user_id in &body.invite {
let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await;
} }
@ -294,7 +315,7 @@ pub async fn create_room_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_room_event_route( pub async fn get_room_event_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_room_event::Request<'_>>, body: Ruma<get_room_event::Request<'_>>,
) -> ConduitResult<get_room_event::Response> { ) -> ConduitResult<get_room_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -322,7 +343,7 @@ pub async fn get_room_event_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn upgrade_room_route( pub async fn upgrade_room_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<upgrade_room::Request<'_>>, body: Ruma<upgrade_room::Request<'_>>,
_room_id: String, _room_id: String,
) -> ConduitResult<upgrade_room::Response> { ) -> ConduitResult<upgrade_room::Response> {
@ -338,6 +359,16 @@ pub async fn upgrade_room_route(
// Create a replacement room // Create a replacement room
let replacement_room = RoomId::new(db.globals.server_name()); let replacement_room = RoomId::new(db.globals.server_name());
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
// Fail if the sender does not have the required permissions // Fail if the sender does not have the required permissions
let tombstone_event_id = db.rooms.build_and_append_pdu( let tombstone_event_id = db.rooms.build_and_append_pdu(
@ -355,6 +386,7 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
// Get the old room federations status // Get the old room federations status
@ -395,6 +427,7 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&replacement_room, &replacement_room,
&db, &db,
&mutex_lock,
)?; )?;
// Join the new room // Join the new room
@ -416,6 +449,7 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&replacement_room, &replacement_room,
&db, &db,
&mutex_lock,
)?; )?;
// Recommended transferable state events list from the specs // Recommended transferable state events list from the specs
@ -449,6 +483,7 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&replacement_room, &replacement_room,
&db, &db,
&mutex_lock,
)?; )?;
} }
@ -492,8 +527,11 @@ pub async fn upgrade_room_route(
sender_user, sender_user,
&body.room_id, &body.room_id,
&db, &db,
&mutex_lock,
)?; )?;
drop(mutex_lock);
db.flush().await?; db.flush().await?;
// Return the replacement room id // Return the replacement room id

6
src/client_server/search.rs

@ -1,7 +1,5 @@
use super::State; use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::api::client::{error::ErrorKind, r0::search::search_events}; use ruma::api::client::{error::ErrorKind, r0::search::search_events};
use std::sync::Arc;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::post; use rocket::post;
@ -14,7 +12,7 @@ use std::collections::BTreeMap;
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn search_events_route( pub async fn search_events_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<search_events::Request<'_>>, body: Ruma<search_events::Request<'_>>,
) -> ConduitResult<search_events::Response> { ) -> ConduitResult<search_events::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

12
src/client_server/session.rs

@ -1,7 +1,5 @@
use std::sync::Arc; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma};
use super::{State, DEVICE_ID_LENGTH, TOKEN_LENGTH};
use crate::{utils, ConduitResult, Database, Error, Ruma};
use log::info; use log::info;
use ruma::{ use ruma::{
api::client::{ api::client::{
@ -52,7 +50,7 @@ pub async fn get_login_types_route() -> ConduitResult<get_login_types::Response>
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn login_route( pub async fn login_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<login::Request<'_>>, body: Ruma<login::Request<'_>>,
) -> ConduitResult<login::Response> { ) -> ConduitResult<login::Response> {
// Validate login method // Validate login method
@ -169,7 +167,7 @@ pub async fn login_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn logout_route( pub async fn logout_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<logout::Request>, body: Ruma<logout::Request>,
) -> ConduitResult<logout::Response> { ) -> ConduitResult<logout::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -197,7 +195,7 @@ pub async fn logout_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn logout_all_route( pub async fn logout_all_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<logout_all::Request>, body: Ruma<logout_all::Request>,
) -> ConduitResult<logout_all::Response> { ) -> ConduitResult<logout_all::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

26
src/client_server/state.rs

@ -1,7 +1,8 @@
use std::sync::Arc; use std::sync::Arc;
use super::State; use crate::{
use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma}; database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma,
};
use ruma::{ use ruma::{
api::client::{ api::client::{
error::ErrorKind, error::ErrorKind,
@ -27,7 +28,7 @@ use rocket::{get, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn send_state_event_for_key_route( pub async fn send_state_event_for_key_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<send_state_event::Request<'_>>, body: Ruma<send_state_event::Request<'_>>,
) -> ConduitResult<send_state_event::Response> { ) -> ConduitResult<send_state_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -53,7 +54,7 @@ pub async fn send_state_event_for_key_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn send_state_event_for_empty_key_route( pub async fn send_state_event_for_empty_key_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<send_state_event::Request<'_>>, body: Ruma<send_state_event::Request<'_>>,
) -> ConduitResult<send_state_event::Response> { ) -> ConduitResult<send_state_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -79,7 +80,7 @@ pub async fn send_state_event_for_empty_key_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_state_events_route( pub async fn get_state_events_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_state_events::Request<'_>>, body: Ruma<get_state_events::Request<'_>>,
) -> ConduitResult<get_state_events::Response> { ) -> ConduitResult<get_state_events::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -126,7 +127,7 @@ pub async fn get_state_events_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_state_events_for_key_route( pub async fn get_state_events_for_key_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_state_events_for_key::Request<'_>>, body: Ruma<get_state_events_for_key::Request<'_>>,
) -> ConduitResult<get_state_events_for_key::Response> { ) -> ConduitResult<get_state_events_for_key::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -177,7 +178,7 @@ pub async fn get_state_events_for_key_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_state_events_for_empty_key_route( pub async fn get_state_events_for_empty_key_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_state_events_for_key::Request<'_>>, body: Ruma<get_state_events_for_key::Request<'_>>,
) -> ConduitResult<get_state_events_for_key::Response> { ) -> ConduitResult<get_state_events_for_key::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -258,6 +259,16 @@ pub async fn send_state_event_for_key_helper(
} }
} }
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let event_id = db.rooms.build_and_append_pdu( let event_id = db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type, event_type,
@ -269,6 +280,7 @@ pub async fn send_state_event_for_key_helper(
&sender_user, &sender_user,
&room_id, &room_id,
&db, &db,
&mutex_lock,
)?; )?;
Ok(event_id) Ok(event_id)

415
src/client_server/sync.rs

@ -1,5 +1,4 @@
use super::State; use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse};
use crate::{ConduitResult, Database, Error, Result, Ruma, RumaResponse};
use log::error; use log::error;
use ruma::{ use ruma::{
api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, api::client::r0::{sync::sync_events, uiaa::UiaaResponse},
@ -35,13 +34,15 @@ use rocket::{get, tokio};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn sync_events_route( pub async fn sync_events_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<sync_events::Request<'_>>, body: Ruma<sync_events::Request<'_>>,
) -> std::result::Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> { ) -> std::result::Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut rx = match db let arc_db = Arc::new(db);
let mut rx = match arc_db
.globals .globals
.sync_receivers .sync_receivers
.write() .write()
@ -52,7 +53,7 @@ pub async fn sync_events_route(
let (tx, rx) = tokio::sync::watch::channel(None); let (tx, rx) = tokio::sync::watch::channel(None);
tokio::spawn(sync_helper_wrapper( tokio::spawn(sync_helper_wrapper(
Arc::clone(&db), Arc::clone(&arc_db),
sender_user.clone(), sender_user.clone(),
sender_device.clone(), sender_device.clone(),
body.since.clone(), body.since.clone(),
@ -68,7 +69,7 @@ pub async fn sync_events_route(
let (tx, rx) = tokio::sync::watch::channel(None); let (tx, rx) = tokio::sync::watch::channel(None);
tokio::spawn(sync_helper_wrapper( tokio::spawn(sync_helper_wrapper(
Arc::clone(&db), Arc::clone(&arc_db),
sender_user.clone(), sender_user.clone(),
sender_device.clone(), sender_device.clone(),
body.since.clone(), body.since.clone(),
@ -88,7 +89,9 @@ pub async fn sync_events_route(
let we_have_to_wait = rx.borrow().is_none(); let we_have_to_wait = rx.borrow().is_none();
if we_have_to_wait { if we_have_to_wait {
let _ = rx.changed().await; if let Err(e) = rx.changed().await {
error!("Error waiting for sync: {}", e);
}
} }
let result = match rx let result = match rx
@ -104,7 +107,7 @@ pub async fn sync_events_route(
} }
pub async fn sync_helper_wrapper( pub async fn sync_helper_wrapper(
db: Arc<Database>, db: Arc<DatabaseGuard>,
sender_user: UserId, sender_user: UserId,
sender_device: Box<DeviceId>, sender_device: Box<DeviceId>,
since: Option<String>, since: Option<String>,
@ -142,11 +145,13 @@ pub async fn sync_helper_wrapper(
} }
} }
drop(db);
let _ = tx.send(Some(r.map(|(r, _)| r.into()))); let _ = tx.send(Some(r.map(|(r, _)| r.into())));
} }
async fn sync_helper( async fn sync_helper(
db: Arc<Database>, db: Arc<DatabaseGuard>,
sender_user: UserId, sender_user: UserId,
sender_device: Box<DeviceId>, sender_device: Box<DeviceId>,
since: Option<String>, since: Option<String>,
@ -184,6 +189,18 @@ async fn sync_helper(
for room_id in db.rooms.rooms_joined(&sender_user) { for room_id in db.rooms.rooms_joined(&sender_user) {
let room_id = room_id?; let room_id = room_id?;
// Get and drop the lock to wait for remaining operations to finish
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
drop(mutex_lock);
let mut non_timeline_pdus = db let mut non_timeline_pdus = db
.rooms .rooms
.pdus_until(&sender_user, &room_id, u64::MAX) .pdus_until(&sender_user, &room_id, u64::MAX)
@ -222,13 +239,16 @@ async fn sync_helper(
// Database queries: // Database queries:
let current_shortstatehash = db.rooms.current_shortstatehash(&room_id)?; let current_shortstatehash = db
.rooms
.current_shortstatehash(&room_id)?
.expect("All rooms have state");
// These type is Option<Option<_>>. The outer Option is None when there is no event between let first_pdu_before_since = db
// since and the current room state, meaning there should be no updates. .rooms
// The inner Option is None when there is an event, but there is no state hash associated .pdus_until(&sender_user, &room_id, since)
// with it. This can happen for the RoomCreate event, so all updates should arrive. .next()
let first_pdu_before_since = db.rooms.pdus_until(&sender_user, &room_id, since).next(); .transpose()?;
let pdus_after_since = db let pdus_after_since = db
.rooms .rooms
@ -236,11 +256,78 @@ async fn sync_helper(
.next() .next()
.is_some(); .is_some();
let since_shortstatehash = first_pdu_before_since.as_ref().map(|pdu| { let since_shortstatehash = first_pdu_before_since
.as_ref()
.map(|pdu| {
db.rooms db.rooms
.pdu_shortstatehash(&pdu.as_ref().ok()?.1.event_id) .pdu_shortstatehash(&pdu.1.event_id)
.ok()? .transpose()
}); .expect("all pdus have state")
})
.transpose()?;
// Calculates joined_member_count, invited_member_count and heroes
let calculate_counts = || {
let joined_member_count = db.rooms.room_members(&room_id).count();
let invited_member_count = db.rooms.room_members_invited(&room_id).count();
// Recalculate heroes (first 5 members)
let mut heroes = Vec::new();
if joined_member_count + invited_member_count <= 5 {
// Go through all PDUs and for each member event, check if the user is still joined or
// invited until we have 5 or we reach the end
for hero in db
.rooms
.all_pdus(&sender_user, &room_id)
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == EventType::RoomMember)
.map(|(_, pdu)| {
let content = serde_json::from_value::<
ruma::events::room::member::MemberEventContent,
>(pdu.content.clone())
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::try_from(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.")
})?;
// The membership was and still is invite or join
if matches!(
content.membership,
MembershipState::Join | MembershipState::Invite
) && (db.rooms.is_joined(&user_id, &room_id)?
|| db.rooms.is_invited(&user_id, &room_id)?)
{
Ok::<_, Error>(Some(state_key.clone()))
} else {
Ok(None)
}
} else {
Ok(None)
}
})
// Filter out buggy users
.filter_map(|u| u.ok())
// Filter for possible heroes
.flatten()
{
if heroes.contains(&hero) || hero == sender_user.as_str() {
continue;
}
heroes.push(hero);
}
}
(
Some(joined_member_count),
Some(invited_member_count),
heroes,
)
};
let ( let (
heroes, heroes,
@ -248,63 +335,107 @@ async fn sync_helper(
invited_member_count, invited_member_count,
joined_since_last_sync, joined_since_last_sync,
state_events, state_events,
) = if pdus_after_since && Some(current_shortstatehash) != since_shortstatehash { ) = if since_shortstatehash.is_none() {
let current_state = db.rooms.room_state_full(&room_id)?; // Probably since = 0, we will do an initial sync
let current_members = current_state let (joined_member_count, invited_member_count, heroes) = calculate_counts();
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
let state_events = current_state_ids
.iter() .iter()
.filter(|(key, _)| key.0 == EventType::RoomMember) .map(|id| db.rooms.get_pdu(id))
.map(|(key, value)| (&key.1, value)) // Only keep state key .filter_map(|r| r.ok().flatten())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let encrypted_room = current_state
.get(&(EventType::RoomEncryption, "".to_owned()))
.is_some();
let since_state = since_shortstatehash
.as_ref()
.map(|since_shortstatehash| {
since_shortstatehash
.map(|since_shortstatehash| db.rooms.state_full(since_shortstatehash))
.transpose()
})
.transpose()?;
let since_encryption = since_state.as_ref().map(|state| {
state
.as_ref()
.map(|state| state.get(&(EventType::RoomEncryption, "".to_owned())))
});
// Calculations:
let new_encrypted_room =
encrypted_room && since_encryption.map_or(true, |encryption| encryption.is_none());
let send_member_count = since_state.as_ref().map_or(true, |since_state| { (
since_state.as_ref().map_or(true, |since_state| { heroes,
current_members.len() joined_member_count,
!= since_state invited_member_count,
.iter() true,
.filter(|(key, _)| key.0 == EventType::RoomMember) state_events,
.count() )
}) } else if !pdus_after_since || since_shortstatehash == Some(current_shortstatehash) {
}); // No state changes
(Vec::new(), None, None, false, Vec::new())
} else {
// Incremental /sync
let since_shortstatehash = since_shortstatehash.unwrap();
let since_sender_member = since_state.as_ref().map(|since_state| { let since_sender_member = db
since_state.as_ref().and_then(|state| { .rooms
state .state_get(
.get(&(EventType::RoomMember, sender_user.as_str().to_owned())) since_shortstatehash,
&EventType::RoomMember,
sender_user.as_str(),
)?
.and_then(|pdu| { .and_then(|pdu| {
serde_json::from_value::< serde_json::from_value::<Raw<ruma::events::room::member::MemberEventContent>>(
Raw<ruma::events::room::member::MemberEventContent>, pdu.content.clone(),
>(pdu.content.clone()) )
.expect("Raw::from_value always works") .expect("Raw::from_value always works")
.deserialize() .deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database.")) .map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok() .ok()
})
})
}); });
let joined_since_last_sync = since_sender_member
.map_or(true, |member| member.membership != MembershipState::Join);
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?;
let state_events = if joined_since_last_sync {
current_state_ids
.iter()
.map(|id| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten())
.collect::<Vec<_>>()
} else {
current_state_ids
.difference(&since_state_ids)
.filter(|id| {
!timeline_pdus
.iter()
.any(|(_, timeline_pdu)| timeline_pdu.event_id == **id)
})
.map(|id| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten())
.collect()
};
let encrypted_room = db
.rooms
.state_get(current_shortstatehash, &EventType::RoomEncryption, "")?
.is_some();
let since_encryption =
db.rooms
.state_get(since_shortstatehash, &EventType::RoomEncryption, "")?;
// Calculations:
let new_encrypted_room = encrypted_room && since_encryption.is_none();
let send_member_count = state_events
.iter()
.any(|event| event.kind == EventType::RoomMember);
if encrypted_room { if encrypted_room {
for (user_id, current_member) in current_members { for (user_id, current_member) in db
.rooms
.room_members(&room_id)
.filter_map(|r| r.ok())
.filter_map(|user_id| {
db.rooms
.state_get(
current_shortstatehash,
&EventType::RoomMember,
user_id.as_str(),
)
.ok()
.flatten()
.map(|current_member| (user_id, current_member))
})
{
let current_membership = serde_json::from_value::< let current_membership = serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>, Raw<ruma::events::room::member::MemberEventContent>,
>(current_member.content.clone()) >(current_member.content.clone())
@ -313,31 +444,23 @@ async fn sync_helper(
.map_err(|_| Error::bad_database("Invalid PDU in database."))? .map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership; .membership;
let since_membership = let since_membership = db
since_state .rooms
.as_ref() .state_get(
.map_or(MembershipState::Leave, |since_state| { since_shortstatehash,
since_state &EventType::RoomMember,
.as_ref() user_id.as_str(),
.and_then(|since_state| { )?
since_state
.get(&(EventType::RoomMember, user_id.clone()))
.and_then(|since_member| { .and_then(|since_member| {
serde_json::from_value::< serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>, Raw<ruma::events::room::member::MemberEventContent>,
>( >(since_member.content.clone())
since_member.content.clone()
)
.expect("Raw::from_value always works") .expect("Raw::from_value always works")
.deserialize() .deserialize()
.map_err(|_| { .map_err(|_| Error::bad_database("Invalid PDU in database."))
Error::bad_database("Invalid PDU in database.")
})
.ok() .ok()
}) })
}) .map_or(MembershipState::Leave, |member| member.membership);
.map_or(MembershipState::Leave, |member| member.membership)
});
let user_id = UserId::try_from(user_id.clone()) let user_id = UserId::try_from(user_id.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
@ -359,10 +482,6 @@ async fn sync_helper(
} }
} }
let joined_since_last_sync = since_sender_member.map_or(true, |member| {
member.map_or(true, |member| member.membership != MembershipState::Join)
});
if joined_since_last_sync && encrypted_room || new_encrypted_room { if joined_since_last_sync && encrypted_room || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users // If the user is in a new encrypted room, give them all joined users
device_list_updates.extend( device_list_updates.extend(
@ -382,100 +501,11 @@ async fn sync_helper(
} }
let (joined_member_count, invited_member_count, heroes) = if send_member_count { let (joined_member_count, invited_member_count, heroes) = if send_member_count {
let joined_member_count = db.rooms.room_members(&room_id).count(); calculate_counts()
let invited_member_count = db.rooms.room_members_invited(&room_id).count();
// Recalculate heroes (first 5 members)
let mut heroes = Vec::new();
if joined_member_count + invited_member_count <= 5 {
// Go through all PDUs and for each member event, check if the user is still joined or
// invited until we have 5 or we reach the end
for hero in db
.rooms
.all_pdus(&sender_user, &room_id)
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == EventType::RoomMember)
.map(|(_, pdu)| {
let content = serde_json::from_value::<
ruma::events::room::member::MemberEventContent,
>(pdu.content.clone())
.map_err(|_| {
Error::bad_database("Invalid member event in database.")
})?;
if let Some(state_key) = &pdu.state_key {
let user_id =
UserId::try_from(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.")
})?;
// The membership was and still is invite or join
if matches!(
content.membership,
MembershipState::Join | MembershipState::Invite
) && (db.rooms.is_joined(&user_id, &room_id)?
|| db.rooms.is_invited(&user_id, &room_id)?)
{
Ok::<_, Error>(Some(state_key.clone()))
} else {
Ok(None)
}
} else {
Ok(None)
}
})
// Filter out buggy users
.filter_map(|u| u.ok())
// Filter for possible heroes
.flatten()
{
if heroes.contains(&hero) || hero == sender_user.as_str() {
continue;
}
heroes.push(hero);
}
}
(
Some(joined_member_count),
Some(invited_member_count),
heroes,
)
} else { } else {
(None, None, Vec::new()) (None, None, Vec::new())
}; };
let state_events = if joined_since_last_sync {
current_state
.iter()
.map(|(_, pdu)| pdu.to_sync_state_event())
.collect()
} else {
match since_state {
None => Vec::new(),
Some(Some(since_state)) => current_state
.iter()
.filter(|(key, value)| {
since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id)
})
.filter(|(_, value)| {
!timeline_pdus.iter().any(|(_, timeline_pdu)| {
timeline_pdu.kind == value.kind
&& timeline_pdu.state_key == value.state_key
})
})
.map(|(_, pdu)| pdu.to_sync_state_event())
.collect(),
Some(None) => current_state
.iter()
.map(|(_, pdu)| pdu.to_sync_state_event())
.collect(),
}
};
( (
heroes, heroes,
joined_member_count, joined_member_count,
@ -483,8 +513,6 @@ async fn sync_helper(
joined_since_last_sync, joined_since_last_sync,
state_events, state_events,
) )
} else {
(Vec::new(), None, None, false, Vec::new())
}; };
// Look for device list updates in this room // Look for device list updates in this room
@ -575,7 +603,10 @@ async fn sync_helper(
events: room_events, events: room_events,
}, },
state: sync_events::State { state: sync_events::State {
events: state_events, events: state_events
.iter()
.map(|pdu| pdu.to_sync_state_event())
.collect(),
}, },
ephemeral: sync_events::Ephemeral { events: edus }, ephemeral: sync_events::Ephemeral { events: edus },
}; };
@ -622,6 +653,19 @@ async fn sync_helper(
let mut left_rooms = BTreeMap::new(); let mut left_rooms = BTreeMap::new();
for result in db.rooms.rooms_left(&sender_user) { for result in db.rooms.rooms_left(&sender_user) {
let (room_id, left_state_events) = result?; let (room_id, left_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
drop(mutex_lock);
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
// Left before last sync // Left before last sync
@ -648,6 +692,19 @@ async fn sync_helper(
let mut invited_rooms = BTreeMap::new(); let mut invited_rooms = BTreeMap::new();
for result in db.rooms.rooms_invited(&sender_user) { for result in db.rooms.rooms_invited(&sender_user) {
let (room_id, invite_state_events) = result?; let (room_id, invite_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
drop(mutex_lock);
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;
// Invited before last sync // Invited before last sync

11
src/client_server/tag.rs

@ -1,10 +1,9 @@
use super::State; use crate::{database::DatabaseGuard, ConduitResult, Ruma};
use crate::{ConduitResult, Database, Ruma};
use ruma::{ use ruma::{
api::client::r0::tag::{create_tag, delete_tag, get_tags}, api::client::r0::tag::{create_tag, delete_tag, get_tags},
events::EventType, events::EventType,
}; };
use std::{collections::BTreeMap, sync::Arc}; use std::collections::BTreeMap;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{delete, get, put}; use rocket::{delete, get, put};
@ -15,7 +14,7 @@ use rocket::{delete, get, put};
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn update_tag_route( pub async fn update_tag_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_tag::Request<'_>>, body: Ruma<create_tag::Request<'_>>,
) -> ConduitResult<create_tag::Response> { ) -> ConduitResult<create_tag::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -52,7 +51,7 @@ pub async fn update_tag_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn delete_tag_route( pub async fn delete_tag_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<delete_tag::Request<'_>>, body: Ruma<delete_tag::Request<'_>>,
) -> ConduitResult<delete_tag::Response> { ) -> ConduitResult<delete_tag::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -86,7 +85,7 @@ pub async fn delete_tag_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_tags_route( pub async fn get_tags_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_tags::Request<'_>>, body: Ruma<get_tags::Request<'_>>,
) -> ConduitResult<get_tags::Response> { ) -> ConduitResult<get_tags::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

10
src/client_server/to_device.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::{ use ruma::{
api::client::{error::ErrorKind, r0::to_device::send_event_to_device}, api::client::{error::ErrorKind, r0::to_device::send_event_to_device},
to_device::DeviceIdOrAllDevices, to_device::DeviceIdOrAllDevices,
@ -16,13 +13,15 @@ use rocket::put;
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn send_event_to_device_route( pub async fn send_event_to_device_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<send_event_to_device::Request<'_>>, body: Ruma<send_event_to_device::Request<'_>>,
) -> ConduitResult<send_event_to_device::Response> { ) -> ConduitResult<send_event_to_device::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref(); let sender_device = body.sender_device.as_deref();
// TODO: uncomment when https://github.com/vector-im/element-android/issues/3589 is solved
// Check if this is a new transaction id // Check if this is a new transaction id
/*
if db if db
.transaction_ids .transaction_ids
.existing_txnid(sender_user, sender_device, &body.txn_id)? .existing_txnid(sender_user, sender_device, &body.txn_id)?
@ -30,6 +29,7 @@ pub async fn send_event_to_device_route(
{ {
return Ok(send_event_to_device::Response.into()); return Ok(send_event_to_device::Response.into());
} }
*/
for (target_user_id, map) in &body.messages { for (target_user_id, map) in &body.messages {
for (target_device_id_maybe, event) in map { for (target_device_id_maybe, event) in map {

7
src/client_server/typing.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma};
use super::State;
use crate::{utils, ConduitResult, Database, Ruma};
use create_typing_event::Typing; use create_typing_event::Typing;
use ruma::api::client::r0::typing::create_typing_event; use ruma::api::client::r0::typing::create_typing_event;
@ -14,7 +11,7 @@ use rocket::put;
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn create_typing_event_route( pub fn create_typing_event_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_typing_event::Request<'_>>, body: Ruma<create_typing_event::Request<'_>>,
) -> ConduitResult<create_typing_event::Response> { ) -> ConduitResult<create_typing_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");

7
src/client_server/user_directory.rs

@ -1,7 +1,4 @@
use std::sync::Arc; use crate::{database::DatabaseGuard, ConduitResult, Ruma};
use super::State;
use crate::{ConduitResult, Database, Ruma};
use ruma::api::client::r0::user_directory::search_users; use ruma::api::client::r0::user_directory::search_users;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
@ -13,7 +10,7 @@ use rocket::post;
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn search_users_route( pub async fn search_users_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<search_users::Request<'_>>, body: Ruma<search_users::Request<'_>>,
) -> ConduitResult<search_users::Response> { ) -> ConduitResult<search_users::Response> {
let limit = u64::from(body.limit) as usize; let limit = u64::from(body.limit) as usize;

206
src/database.rs

@ -19,16 +19,22 @@ use abstraction::DatabaseEngine;
use directories::ProjectDirs; use directories::ProjectDirs;
use log::error; use log::error;
use lru_cache::LruCache; use lru_cache::LruCache;
use rocket::futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use rocket::{
futures::{channel::mpsc, stream::FuturesUnordered, StreamExt},
outcome::IntoOutcome,
request::{FromRequest, Request},
try_outcome, State,
};
use ruma::{DeviceId, ServerName, UserId}; use ruma::{DeviceId, ServerName, UserId};
use serde::Deserialize; use serde::{de::IgnoredAny, Deserialize};
use std::{ use std::{
collections::HashMap, collections::{BTreeMap, HashMap},
fs::{self, remove_dir_all}, fs::{self, remove_dir_all},
io::Write, io::Write,
ops::Deref,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
}; };
use tokio::sync::Semaphore; use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore};
use self::proxy::ProxyConfig; use self::proxy::ProxyConfig;
@ -36,8 +42,16 @@ use self::proxy::ProxyConfig;
pub struct Config { pub struct Config {
server_name: Box<ServerName>, server_name: Box<ServerName>,
database_path: String, database_path: String,
#[serde(default = "default_cache_capacity")] #[serde(default = "default_db_cache_capacity")]
cache_capacity: u32, db_cache_capacity: u32,
#[serde(default = "default_sqlite_read_pool_size")]
sqlite_read_pool_size: usize,
#[serde(default = "false_fn")]
sqlite_wal_clean_timer: bool,
#[serde(default = "default_sqlite_wal_clean_second_interval")]
sqlite_wal_clean_second_interval: u32,
#[serde(default = "default_sqlite_wal_clean_second_timeout")]
sqlite_wal_clean_second_timeout: u32,
#[serde(default = "default_max_request_size")] #[serde(default = "default_max_request_size")]
max_request_size: u32, max_request_size: u32,
#[serde(default = "default_max_concurrent_requests")] #[serde(default = "default_max_concurrent_requests")]
@ -57,6 +71,29 @@ pub struct Config {
trusted_servers: Vec<Box<ServerName>>, trusted_servers: Vec<Box<ServerName>>,
#[serde(default = "default_log")] #[serde(default = "default_log")]
pub log: String, pub log: String,
#[serde(flatten)]
catchall: BTreeMap<String, IgnoredAny>,
}
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
impl Config {
pub fn warn_deprecated(&self) {
let mut was_deprecated = false;
for key in self
.catchall
.keys()
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
{
log::warn!("Config parameter {} is deprecated", key);
was_deprecated = true;
}
if was_deprecated {
log::warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
}
}
} }
fn false_fn() -> bool { fn false_fn() -> bool {
@ -67,10 +104,22 @@ fn true_fn() -> bool {
true true
} }
fn default_cache_capacity() -> u32 { fn default_db_cache_capacity() -> u32 {
1024 * 1024 * 1024 1024 * 1024 * 1024
} }
fn default_sqlite_read_pool_size() -> usize {
num_cpus::get().max(1)
}
fn default_sqlite_wal_clean_second_interval() -> u32 {
60
}
fn default_sqlite_wal_clean_second_timeout() -> u32 {
2
}
fn default_max_request_size() -> u32 { fn default_max_request_size() -> u32 {
20 * 1024 * 1024 // Default to 20 MB 20 * 1024 * 1024 // Default to 20 MB
} }
@ -84,12 +133,16 @@ fn default_log() -> String {
} }
#[cfg(feature = "sled")] #[cfg(feature = "sled")]
pub type Engine = abstraction::SledEngine; pub type Engine = abstraction::sled::Engine;
#[cfg(feature = "rocksdb")] #[cfg(feature = "rocksdb")]
pub type Engine = abstraction::RocksDbEngine; pub type Engine = abstraction::rocksdb::Engine;
#[cfg(feature = "sqlite")]
pub type Engine = abstraction::sqlite::Engine;
pub struct Database { pub struct Database {
_db: Arc<Engine>,
pub globals: globals::Globals, pub globals: globals::Globals,
pub users: users::Users, pub users: users::Users,
pub uiaa: uiaa::Uiaa, pub uiaa: uiaa::Uiaa,
@ -118,7 +171,7 @@ impl Database {
} }
/// Load an existing database or create a new one. /// Load an existing database or create a new one.
pub async fn load_or_create(config: Config) -> Result<Arc<Self>> { pub async fn load_or_create(config: Config) -> Result<Arc<TokioRwLock<Self>>> {
let builder = Engine::open(&config)?; let builder = Engine::open(&config)?;
if config.max_request_size < 1024 { if config.max_request_size < 1024 {
@ -128,7 +181,8 @@ impl Database {
let (admin_sender, admin_receiver) = mpsc::unbounded(); let (admin_sender, admin_receiver) = mpsc::unbounded();
let (sending_sender, sending_receiver) = mpsc::unbounded(); let (sending_sender, sending_receiver) = mpsc::unbounded();
let db = Arc::new(Self { let db = Arc::new(TokioRwLock::from(Self {
_db: builder.clone(),
users: users::Users { users: users::Users {
userid_password: builder.open_tree("userid_password")?, userid_password: builder.open_tree("userid_password")?,
userid_displayname: builder.open_tree("userid_displayname")?, userid_displayname: builder.open_tree("userid_displayname")?,
@ -195,7 +249,7 @@ impl Database {
eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?,
prevevent_parent: builder.open_tree("prevevent_parent")?, prevevent_parent: builder.open_tree("prevevent_parent")?,
pdu_cache: RwLock::new(LruCache::new(1_000_000)), pdu_cache: RwLock::new(LruCache::new(10_000)),
}, },
account_data: account_data::AccountData { account_data: account_data::AccountData {
roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,
@ -231,10 +285,12 @@ impl Database {
globals: globals::Globals::load( globals: globals::Globals::load(
builder.open_tree("global")?, builder.open_tree("global")?,
builder.open_tree("server_signingkeys")?, builder.open_tree("server_signingkeys")?,
config, config.clone(),
)?, )?,
}); }));
{
let db = db.read().await;
// MIGRATIONS // MIGRATIONS
// TODO: database versions of new dbs should probably not be 0 // TODO: database versions of new dbs should probably not be 0
if db.globals.database_version()? < 1 { if db.globals.database_version()? < 1 {
@ -319,12 +375,22 @@ impl Database {
println!("Migration: 3 -> 4 finished"); println!("Migration: 3 -> 4 finished");
} }
}
let guard = db.read().await;
// This data is probably outdated // This data is probably outdated
db.rooms.edus.presenceid_presence.clear()?; guard.rooms.edus.presenceid_presence.clear()?;
guard.admin.start_handler(Arc::clone(&db), admin_receiver);
guard
.sending
.start_handler(Arc::clone(&db), sending_receiver);
db.admin.start_handler(Arc::clone(&db), admin_receiver); drop(guard);
db.sending.start_handler(Arc::clone(&db), sending_receiver);
#[cfg(feature = "sqlite")]
Self::start_wal_clean_task(&db, &config).await;
Ok(db) Ok(db)
} }
@ -413,13 +479,113 @@ impl Database {
.watch_prefix(&userid_bytes), .watch_prefix(&userid_bytes),
); );
futures.push(Box::pin(self.globals.rotate.watch()));
// Wait until one of them finds something // Wait until one of them finds something
futures.next().await; futures.next().await;
} }
pub async fn flush(&self) -> Result<()> { pub async fn flush(&self) -> Result<()> {
// noop while we don't use sled 1.0 let start = std::time::Instant::now();
//self._db.flush_async().await?;
Ok(()) let res = self._db.flush();
log::debug!("flush: took {:?}", start.elapsed());
res
}
#[cfg(feature = "sqlite")]
pub fn flush_wal(&self) -> Result<()> {
self._db.flush_wal()
}
#[cfg(feature = "sqlite")]
pub async fn start_wal_clean_task(lock: &Arc<TokioRwLock<Self>>, config: &Config) {
use tokio::{
select,
signal::unix::{signal, SignalKind},
time::{interval, timeout},
};
use std::{
sync::Weak,
time::{Duration, Instant},
};
let weak: Weak<TokioRwLock<Database>> = Arc::downgrade(&lock);
let lock_timeout = Duration::from_secs(config.sqlite_wal_clean_second_timeout as u64);
let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64);
let do_timer = config.sqlite_wal_clean_timer;
tokio::spawn(async move {
let mut i = interval(timer_interval);
let mut s = signal(SignalKind::hangup()).unwrap();
loop {
select! {
_ = i.tick(), if do_timer => {
log::info!(target: "wal-trunc", "Timer ticked")
}
_ = s.recv() => {
log::info!(target: "wal-trunc", "Received SIGHUP")
}
};
if let Some(arc) = Weak::upgrade(&weak) {
log::info!(target: "wal-trunc", "Rotating sync helpers...");
// This actually creates a very small race condition between firing this and trying to acquire the subsequent write lock.
// Though it is not a huge deal if the write lock doesn't "catch", as it'll harmlessly time out.
arc.read().await.globals.rotate.fire();
log::info!(target: "wal-trunc", "Locking...");
let guard = {
if let Ok(guard) = timeout(lock_timeout, arc.write()).await {
guard
} else {
log::info!(target: "wal-trunc", "Lock failed in timeout, canceled.");
continue;
}
};
log::info!(target: "wal-trunc", "Locked, flushing...");
let start = Instant::now();
if let Err(e) = guard.flush_wal() {
log::error!(target: "wal-trunc", "Errored: {}", e);
} else {
log::info!(target: "wal-trunc", "Flushed in {:?}", start.elapsed());
}
} else {
break;
}
}
});
}
}
pub struct DatabaseGuard(OwnedRwLockReadGuard<Database>);
impl Deref for DatabaseGuard {
type Target = OwnedRwLockReadGuard<Database>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[rocket::async_trait]
impl<'r> FromRequest<'r> for DatabaseGuard {
type Error = ();
async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome<Self, ()> {
let db = try_outcome!(req.guard::<State<'_, Arc<TokioRwLock<Database>>>>().await);
Ok(DatabaseGuard(Arc::clone(&db).read_owned().await)).or_forward(())
}
}
impl Into<DatabaseGuard> for OwnedRwLockReadGuard<Database> {
fn into(self) -> DatabaseGuard {
DatabaseGuard(self)
} }
} }

297
src/database/abstraction.rs

@ -1,28 +1,21 @@
use super::Config; use super::Config;
use crate::{utils, Result}; use crate::Result;
use log::warn;
use std::{future::Future, pin::Pin, sync::Arc}; use std::{future::Future, pin::Pin, sync::Arc};
#[cfg(feature = "rocksdb")] #[cfg(feature = "rocksdb")]
use std::{collections::BTreeMap, sync::RwLock}; pub mod rocksdb;
#[cfg(feature = "sled")] #[cfg(feature = "sled")]
pub struct SledEngine(sled::Db); pub mod sled;
#[cfg(feature = "sled")]
pub struct SledEngineTree(sled::Tree);
#[cfg(feature = "rocksdb")] #[cfg(feature = "sqlite")]
pub struct RocksDbEngine(rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>); pub mod sqlite;
#[cfg(feature = "rocksdb")]
pub struct RocksDbEngineTree<'a> {
db: Arc<RocksDbEngine>,
name: &'a str,
watchers: RwLock<BTreeMap<Vec<u8>, Vec<tokio::sync::oneshot::Sender<()>>>>,
}
pub trait DatabaseEngine: Sized { pub trait DatabaseEngine: Sized {
fn open(config: &Config) -> Result<Arc<Self>>; fn open(config: &Config) -> Result<Arc<Self>>;
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>>; fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>>;
fn flush(self: &Arc<Self>) -> Result<()>;
} }
pub trait Tree: Send + Sync { pub trait Tree: Send + Sync {
@ -32,20 +25,20 @@ pub trait Tree: Send + Sync {
fn remove(&self, key: &[u8]) -> Result<()>; fn remove(&self, key: &[u8]) -> Result<()>;
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + Sync + 'a>; fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a>;
fn iter_from<'a>( fn iter_from<'a>(
&'a self, &'a self,
from: &[u8], from: &[u8],
backwards: bool, backwards: bool,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a>; ) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a>;
fn increment(&self, key: &[u8]) -> Result<Vec<u8>>; fn increment(&self, key: &[u8]) -> Result<Vec<u8>>;
fn scan_prefix<'a>( fn scan_prefix<'a>(
&'a self, &'a self,
prefix: Vec<u8>, prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + 'a>; ) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a>;
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>>; fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>>;
@ -57,273 +50,3 @@ pub trait Tree: Send + Sync {
Ok(()) Ok(())
} }
} }
#[cfg(feature = "sled")]
impl DatabaseEngine for SledEngine {
fn open(config: &Config) -> Result<Arc<Self>> {
Ok(Arc::new(SledEngine(
sled::Config::default()
.path(&config.database_path)
.cache_capacity(config.cache_capacity as u64)
.use_compression(true)
.open()?,
)))
}
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
Ok(Arc::new(SledEngineTree(self.0.open_tree(name)?)))
}
}
#[cfg(feature = "sled")]
impl Tree for SledEngineTree {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
Ok(self.0.get(key)?.map(|v| v.to_vec()))
}
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
self.0.insert(key, value)?;
Ok(())
}
fn remove(&self, key: &[u8]) -> Result<()> {
self.0.remove(key)?;
Ok(())
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + Sync + 'a> {
Box::new(
self.0
.iter()
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into())),
)
}
fn iter_from(
&self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)>> {
let iter = if backwards {
self.0.range(..from)
} else {
self.0.range(from..)
};
let iter = iter
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into()));
if backwards {
Box::new(iter.rev())
} else {
Box::new(iter)
}
}
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
Ok(self
.0
.update_and_fetch(key, utils::increment)
.map(|o| o.expect("increment always sets a value").to_vec())?)
}
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + 'a> {
let iter = self
.0
.scan_prefix(prefix)
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into()));
Box::new(iter)
}
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let prefix = prefix.to_vec();
Box::pin(async move {
self.0.watch_prefix(prefix).await;
})
}
}
#[cfg(feature = "rocksdb")]
impl DatabaseEngine for RocksDbEngine {
fn open(config: &Config) -> Result<Arc<Self>> {
let mut db_opts = rocksdb::Options::default();
db_opts.create_if_missing(true);
db_opts.set_max_open_files(16);
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy);
db_opts.set_target_file_size_base(256 << 20);
db_opts.set_write_buffer_size(256 << 20);
let mut block_based_options = rocksdb::BlockBasedOptions::default();
block_based_options.set_block_size(512 << 10);
db_opts.set_block_based_table_factory(&block_based_options);
let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf(
&db_opts,
&config.database_path,
)
.unwrap_or_default();
let mut options = rocksdb::Options::default();
options.set_merge_operator_associative("increment", utils::increment_rocksdb);
let db = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::open_cf_descriptors(
&db_opts,
&config.database_path,
cfs.iter()
.map(|name| rocksdb::ColumnFamilyDescriptor::new(name, options.clone())),
)?;
Ok(Arc::new(RocksDbEngine(db)))
}
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
let mut options = rocksdb::Options::default();
options.set_merge_operator_associative("increment", utils::increment_rocksdb);
// Create if it doesn't exist
let _ = self.0.create_cf(name, &options);
Ok(Arc::new(RocksDbEngineTree {
name,
db: Arc::clone(self),
watchers: RwLock::new(BTreeMap::new()),
}))
}
}
#[cfg(feature = "rocksdb")]
impl RocksDbEngineTree<'_> {
fn cf(&self) -> rocksdb::BoundColumnFamily<'_> {
self.db.0.cf_handle(self.name).unwrap()
}
}
#[cfg(feature = "rocksdb")]
impl Tree for RocksDbEngineTree<'_> {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
Ok(self.db.0.get_cf(self.cf(), key)?)
}
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
let watchers = self.watchers.read().unwrap();
let mut triggered = Vec::new();
for length in 0..=key.len() {
if watchers.contains_key(&key[..length]) {
triggered.push(&key[..length]);
}
}
drop(watchers);
if !triggered.is_empty() {
let mut watchers = self.watchers.write().unwrap();
for prefix in triggered {
if let Some(txs) = watchers.remove(prefix) {
for tx in txs {
let _ = tx.send(());
}
}
}
}
Ok(self.db.0.put_cf(self.cf(), key, value)?)
}
fn remove(&self, key: &[u8]) -> Result<()> {
Ok(self.db.0.delete_cf(self.cf(), key)?)
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + Sync + 'a> {
Box::new(
self.db
.0
.iterator_cf(self.cf(), rocksdb::IteratorMode::Start),
)
}
fn iter_from<'a>(
&'a self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
Box::new(self.db.0.iterator_cf(
self.cf(),
rocksdb::IteratorMode::From(
from,
if backwards {
rocksdb::Direction::Reverse
} else {
rocksdb::Direction::Forward
},
),
))
}
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.db.0]), None).unwrap();
dbg!(stats.mem_table_total);
dbg!(stats.mem_table_unflushed);
dbg!(stats.mem_table_readers_total);
dbg!(stats.cache_total);
// TODO: atomic?
let old = self.get(key)?;
let new = utils::increment(old.as_deref()).unwrap();
self.insert(key, &new)?;
Ok(new)
}
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + 'a> {
Box::new(
self.db
.0
.iterator_cf(
self.cf(),
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
)
.take_while(move |(k, _)| k.starts_with(&prefix)),
)
}
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let (tx, rx) = tokio::sync::oneshot::channel();
self.watchers
.write()
.unwrap()
.entry(prefix.to_vec())
.or_default()
.push(tx);
Box::pin(async move {
// Tx is never destroyed
rx.await.unwrap();
})
}
}

176
src/database/abstraction/rocksdb.rs

@ -0,0 +1,176 @@
use super::super::Config;
use crate::{utils, Result};
use std::{future::Future, pin::Pin, sync::Arc};
use super::{DatabaseEngine, Tree};
use std::{collections::BTreeMap, sync::RwLock};
pub struct Engine(rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>);
pub struct RocksDbEngineTree<'a> {
db: Arc<Engine>,
name: &'a str,
watchers: RwLock<BTreeMap<Vec<u8>, Vec<tokio::sync::oneshot::Sender<()>>>>,
}
impl DatabaseEngine for Engine {
fn open(config: &Config) -> Result<Arc<Self>> {
let mut db_opts = rocksdb::Options::default();
db_opts.create_if_missing(true);
db_opts.set_max_open_files(16);
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy);
db_opts.set_target_file_size_base(256 << 20);
db_opts.set_write_buffer_size(256 << 20);
let mut block_based_options = rocksdb::BlockBasedOptions::default();
block_based_options.set_block_size(512 << 10);
db_opts.set_block_based_table_factory(&block_based_options);
let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf(
&db_opts,
&config.database_path,
)
.unwrap_or_default();
let mut options = rocksdb::Options::default();
options.set_merge_operator_associative("increment", utils::increment_rocksdb);
let db = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::open_cf_descriptors(
&db_opts,
&config.database_path,
cfs.iter()
.map(|name| rocksdb::ColumnFamilyDescriptor::new(name, options.clone())),
)?;
Ok(Arc::new(Engine(db)))
}
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
let mut options = rocksdb::Options::default();
options.set_merge_operator_associative("increment", utils::increment_rocksdb);
// Create if it doesn't exist
let _ = self.0.create_cf(name, &options);
Ok(Arc::new(RocksDbEngineTree {
name,
db: Arc::clone(self),
watchers: RwLock::new(BTreeMap::new()),
}))
}
}
impl RocksDbEngineTree<'_> {
fn cf(&self) -> rocksdb::BoundColumnFamily<'_> {
self.db.0.cf_handle(self.name).unwrap()
}
}
impl Tree for RocksDbEngineTree<'_> {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
Ok(self.db.0.get_cf(self.cf(), key)?)
}
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
let watchers = self.watchers.read().unwrap();
let mut triggered = Vec::new();
for length in 0..=key.len() {
if watchers.contains_key(&key[..length]) {
triggered.push(&key[..length]);
}
}
drop(watchers);
if !triggered.is_empty() {
let mut watchers = self.watchers.write().unwrap();
for prefix in triggered {
if let Some(txs) = watchers.remove(prefix) {
for tx in txs {
let _ = tx.send(());
}
}
}
}
Ok(self.db.0.put_cf(self.cf(), key, value)?)
}
fn remove(&self, key: &[u8]) -> Result<()> {
Ok(self.db.0.delete_cf(self.cf(), key)?)
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + Sync + 'a> {
Box::new(
self.db
.0
.iterator_cf(self.cf(), rocksdb::IteratorMode::Start),
)
}
fn iter_from<'a>(
&'a self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
Box::new(self.db.0.iterator_cf(
self.cf(),
rocksdb::IteratorMode::From(
from,
if backwards {
rocksdb::Direction::Reverse
} else {
rocksdb::Direction::Forward
},
),
))
}
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.db.0]), None).unwrap();
dbg!(stats.mem_table_total);
dbg!(stats.mem_table_unflushed);
dbg!(stats.mem_table_readers_total);
dbg!(stats.cache_total);
// TODO: atomic?
let old = self.get(key)?;
let new = utils::increment(old.as_deref()).unwrap();
self.insert(key, &new)?;
Ok(new)
}
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a> {
Box::new(
self.db
.0
.iterator_cf(
self.cf(),
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
)
.take_while(move |(k, _)| k.starts_with(&prefix)),
)
}
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let (tx, rx) = tokio::sync::oneshot::channel();
self.watchers
.write()
.unwrap()
.entry(prefix.to_vec())
.or_default()
.push(tx);
Box::pin(async move {
// Tx is never destroyed
rx.await.unwrap();
})
}
}

119
src/database/abstraction/sled.rs

@ -0,0 +1,119 @@
use super::super::Config;
use crate::{utils, Result};
use log::warn;
use std::{future::Future, pin::Pin, sync::Arc};
use super::{DatabaseEngine, Tree};
pub struct Engine(sled::Db);
pub struct SledEngineTree(sled::Tree);
impl DatabaseEngine for Engine {
fn open(config: &Config) -> Result<Arc<Self>> {
Ok(Arc::new(Engine(
sled::Config::default()
.path(&config.database_path)
.cache_capacity(config.db_cache_capacity as u64)
.use_compression(true)
.open()?,
)))
}
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
Ok(Arc::new(SledEngineTree(self.0.open_tree(name)?)))
}
fn flush(self: &Arc<Self>) -> Result<()> {
Ok(()) // noop
}
}
impl Tree for SledEngineTree {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
Ok(self.0.get(key)?.map(|v| v.to_vec()))
}
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
self.0.insert(key, value)?;
Ok(())
}
fn remove(&self, key: &[u8]) -> Result<()> {
self.0.remove(key)?;
Ok(())
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a> {
Box::new(
self.0
.iter()
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into())),
)
}
fn iter_from(
&self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send> {
let iter = if backwards {
self.0.range(..=from)
} else {
self.0.range(from..)
};
let iter = iter
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into()));
if backwards {
Box::new(iter.rev())
} else {
Box::new(iter)
}
}
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
Ok(self
.0
.update_and_fetch(key, utils::increment)
.map(|o| o.expect("increment always sets a value").to_vec())?)
}
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a> {
let iter = self
.0
.scan_prefix(prefix)
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into()));
Box::new(iter)
}
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let prefix = prefix.to_vec();
Box::pin(async move {
self.0.watch_prefix(prefix).await;
})
}
}

438
src/database/abstraction/sqlite.rs

@ -0,0 +1,438 @@
use std::{
collections::BTreeMap,
future::Future,
ops::Deref,
path::{Path, PathBuf},
pin::Pin,
sync::Arc,
thread,
time::{Duration, Instant},
};
use crate::{database::Config, Result};
use super::{DatabaseEngine, Tree};
use log::debug;
use crossbeam::channel::{bounded, Sender as ChannelSender};
use parking_lot::{Mutex, MutexGuard, RwLock};
use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension};
use tokio::sync::oneshot::Sender;
// const SQL_CREATE_TABLE: &str =
// "CREATE TABLE IF NOT EXISTS {} {{ \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL }}";
// const SQL_SELECT: &str = "SELECT value FROM {} WHERE key = ?";
// const SQL_INSERT: &str = "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)";
// const SQL_DELETE: &str = "DELETE FROM {} WHERE key = ?";
// const SQL_SELECT_ITER: &str = "SELECT key, value FROM {}";
// const SQL_SELECT_PREFIX: &str = "SELECT key, value FROM {} WHERE key LIKE ?||'%' ORDER BY key ASC";
// const SQL_SELECT_ITER_FROM_FORWARDS: &str = "SELECT key, value FROM {} WHERE key >= ? ORDER BY ASC";
// const SQL_SELECT_ITER_FROM_BACKWARDS: &str =
// "SELECT key, value FROM {} WHERE key <= ? ORDER BY DESC";
struct Pool {
writer: Mutex<Connection>,
readers: Vec<Mutex<Connection>>,
spill_tracker: Arc<()>,
path: PathBuf,
}
pub const MILLI: Duration = Duration::from_millis(1);
enum HoldingConn<'a> {
FromGuard(MutexGuard<'a, Connection>),
FromOwned(Connection, Arc<()>),
}
impl<'a> Deref for HoldingConn<'a> {
type Target = Connection;
fn deref(&self) -> &Self::Target {
match self {
HoldingConn::FromGuard(guard) => guard.deref(),
HoldingConn::FromOwned(conn, _) => conn,
}
}
}
impl Pool {
fn new<P: AsRef<Path>>(path: P, num_readers: usize, cache_size: u32) -> Result<Self> {
let writer = Mutex::new(Self::prepare_conn(&path, Some(cache_size))?);
let mut readers = Vec::new();
for _ in 0..num_readers {
readers.push(Mutex::new(Self::prepare_conn(&path, Some(cache_size))?))
}
Ok(Self {
writer,
readers,
spill_tracker: Arc::new(()),
path: path.as_ref().to_path_buf(),
})
}
fn prepare_conn<P: AsRef<Path>>(path: P, cache_size: Option<u32>) -> Result<Connection> {
let conn = Connection::open(path)?;
conn.pragma_update(Some(Main), "journal_mode", &"WAL".to_owned())?;
// conn.pragma_update(Some(Main), "wal_autocheckpoint", &250)?;
// conn.pragma_update(Some(Main), "wal_checkpoint", &"FULL".to_owned())?;
conn.pragma_update(Some(Main), "synchronous", &"OFF".to_owned())?;
if let Some(cache_kib) = cache_size {
conn.pragma_update(Some(Main), "cache_size", &(-Into::<i64>::into(cache_kib)))?;
}
Ok(conn)
}
fn write_lock(&self) -> MutexGuard<'_, Connection> {
self.writer.lock()
}
fn read_lock(&self) -> HoldingConn<'_> {
for r in &self.readers {
if let Some(reader) = r.try_lock() {
return HoldingConn::FromGuard(reader);
}
}
let spill_arc = self.spill_tracker.clone();
let now_count = Arc::strong_count(&spill_arc) - 1 /* because one is held by the pool */;
log::warn!("read_lock: all readers locked, creating spillover reader...");
if now_count > 1 {
log::warn!("read_lock: now {} spillover readers exist", now_count);
}
let spilled = Self::prepare_conn(&self.path, None).unwrap();
return HoldingConn::FromOwned(spilled, spill_arc);
}
}
pub struct Engine {
pool: Pool,
}
impl DatabaseEngine for Engine {
fn open(config: &Config) -> Result<Arc<Self>> {
let pool = Pool::new(
Path::new(&config.database_path).join("conduit.db"),
config.sqlite_read_pool_size,
config.db_cache_capacity / 1024, // bytes -> kb
)?;
pool.write_lock()
.execute("CREATE TABLE IF NOT EXISTS _noop (\"key\" INT)", params![])?;
let arc = Arc::new(Engine { pool });
Ok(arc)
}
fn open_tree(self: &Arc<Self>, name: &str) -> Result<Arc<dyn Tree>> {
self.pool.write_lock().execute(format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name).as_str(), [])?;
Ok(Arc::new(SqliteTable {
engine: Arc::clone(self),
name: name.to_owned(),
watchers: RwLock::new(BTreeMap::new()),
}))
}
fn flush(self: &Arc<Self>) -> Result<()> {
self.pool
.write_lock()
.execute_batch(
"
PRAGMA synchronous=FULL;
BEGIN;
DELETE FROM _noop;
INSERT INTO _noop VALUES (1);
COMMIT;
PRAGMA synchronous=OFF;
",
)
.map_err(Into::into)
}
}
impl Engine {
pub fn flush_wal(self: &Arc<Self>) -> Result<()> {
self.pool
.write_lock()
.execute_batch(
"
PRAGMA synchronous=FULL; PRAGMA wal_checkpoint=TRUNCATE;
BEGIN;
DELETE FROM _noop;
INSERT INTO _noop VALUES (1);
COMMIT;
PRAGMA wal_checkpoint=PASSIVE; PRAGMA synchronous=OFF;
",
)
.map_err(Into::into)
}
}
pub struct SqliteTable {
engine: Arc<Engine>,
name: String,
watchers: RwLock<BTreeMap<Vec<u8>, Vec<Sender<()>>>>,
}
type TupleOfBytes = (Vec<u8>, Vec<u8>);
impl SqliteTable {
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
Ok(guard
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
.query_row([key], |row| row.get(0))
.optional()?)
}
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
guard.execute(
format!(
"INSERT INTO {} (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value",
self.name
)
.as_str(),
[key, value],
)?;
Ok(())
}
fn _iter_from_thread<F>(&self, f: F) -> Box<dyn Iterator<Item = TupleOfBytes> + Send>
where
F: (for<'a> FnOnce(&'a Connection, ChannelSender<TupleOfBytes>)) + Send + 'static,
{
let (s, r) = bounded::<TupleOfBytes>(5);
let engine = self.engine.clone();
thread::spawn(move || {
let _ = f(&engine.pool.read_lock(), s);
});
Box::new(r.into_iter())
}
}
macro_rules! iter_from_thread {
($self:expr, $sql:expr, $param:expr) => {
$self._iter_from_thread(move |guard, s| {
let _ = guard
.prepare($sql)
.unwrap()
.query_map($param, |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
.unwrap()
.map(|r| r.unwrap())
.try_for_each(|bob| s.send(bob));
})
};
}
impl Tree for SqliteTable {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
let guard = self.engine.pool.read_lock();
// let start = Instant::now();
let val = self.get_with_guard(&guard, key);
// debug!("get: took {:?}", start.elapsed());
// debug!("get key: {:?}", &key)
val
}
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
let guard = self.engine.pool.write_lock();
let start = Instant::now();
self.insert_with_guard(&guard, key, value)?;
let elapsed = start.elapsed();
if elapsed > MILLI {
debug!("insert: took {:012?} : {}", elapsed, &self.name);
}
drop(guard);
let watchers = self.watchers.read();
let mut triggered = Vec::new();
for length in 0..=key.len() {
if watchers.contains_key(&key[..length]) {
triggered.push(&key[..length]);
}
}
drop(watchers);
if !triggered.is_empty() {
let mut watchers = self.watchers.write();
for prefix in triggered {
if let Some(txs) = watchers.remove(prefix) {
for tx in txs {
let _ = tx.send(());
}
}
}
};
Ok(())
}
fn remove(&self, key: &[u8]) -> Result<()> {
let guard = self.engine.pool.write_lock();
let start = Instant::now();
guard.execute(
format!("DELETE FROM {} WHERE key = ?", self.name).as_str(),
[key],
)?;
let elapsed = start.elapsed();
if elapsed > MILLI {
debug!("remove: took {:012?} : {}", elapsed, &self.name);
}
// debug!("remove key: {:?}", &key);
Ok(())
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = TupleOfBytes> + Send + 'a> {
let name = self.name.clone();
iter_from_thread!(
self,
format!("SELECT key, value FROM {}", name).as_str(),
params![]
)
}
fn iter_from<'a>(
&'a self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = TupleOfBytes> + Send + 'a> {
let name = self.name.clone();
let from = from.to_vec(); // TODO change interface?
if backwards {
iter_from_thread!(
self,
format!(
"SELECT key, value FROM {} WHERE key <= ? ORDER BY key DESC",
name
)
.as_str(),
[from]
)
} else {
iter_from_thread!(
self,
format!(
"SELECT key, value FROM {} WHERE key >= ? ORDER BY key ASC",
name
)
.as_str(),
[from]
)
}
}
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
let guard = self.engine.pool.write_lock();
let start = Instant::now();
let old = self.get_with_guard(&guard, key)?;
let new =
crate::utils::increment(old.as_deref()).expect("utils::increment always returns Some");
self.insert_with_guard(&guard, key, &new)?;
let elapsed = start.elapsed();
if elapsed > MILLI {
debug!("increment: took {:012?} : {}", elapsed, &self.name);
}
// debug!("increment key: {:?}", &key);
Ok(new)
}
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = TupleOfBytes> + Send + 'a> {
// let name = self.name.clone();
// iter_from_thread!(
// self,
// format!(
// "SELECT key, value FROM {} WHERE key BETWEEN ?1 AND ?1 || X'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' ORDER BY key ASC",
// name
// )
// .as_str(),
// [prefix]
// )
Box::new(
self.iter_from(&prefix, false)
.take_while(move |(key, _)| key.starts_with(&prefix)),
)
}
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let (tx, rx) = tokio::sync::oneshot::channel();
self.watchers
.write()
.entry(prefix.to_vec())
.or_default()
.push(tx);
Box::pin(async move {
// Tx is never destroyed
rx.await.unwrap();
})
}
fn clear(&self) -> Result<()> {
debug!("clear: running");
self.engine
.pool
.write_lock()
.execute(format!("DELETE FROM {}", self.name).as_str(), [])?;
debug!("clear: ran");
Ok(())
}
}
// TODO
// struct Pool<const NUM_READERS: usize> {
// writer: Mutex<Connection>,
// readers: [Mutex<Connection>; NUM_READERS],
// }
// // then, to pick a reader:
// for r in &pool.readers {
// if let Ok(reader) = r.try_lock() {
// // use reader
// }
// }
// // none unlocked, pick the next reader
// pool.readers[pool.counter.fetch_add(1, Relaxed) % NUM_READERS].lock()

2
src/database/account_data.rs

@ -127,7 +127,7 @@ impl AccountData {
room_id: Option<&RoomId>, room_id: Option<&RoomId>,
user_id: &UserId, user_id: &UserId,
kind: &EventType, kind: &EventType,
) -> Result<Option<(Box<[u8]>, Box<[u8]>)>> { ) -> Result<Option<(Vec<u8>, Vec<u8>)>> {
let mut prefix = room_id let mut prefix = room_id
.map(|r| r.to_string()) .map(|r| r.to_string())
.unwrap_or_default() .unwrap_or_default()

55
src/database/admin.rs

@ -10,6 +10,7 @@ use ruma::{
events::{room::message, EventType}, events::{room::message, EventType},
UserId, UserId,
}; };
use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard};
pub enum AdminCommand { pub enum AdminCommand {
RegisterAppservice(serde_yaml::Value), RegisterAppservice(serde_yaml::Value),
@ -25,32 +26,43 @@ pub struct Admin {
impl Admin { impl Admin {
pub fn start_handler( pub fn start_handler(
&self, &self,
db: Arc<Database>, db: Arc<RwLock<Database>>,
mut receiver: mpsc::UnboundedReceiver<AdminCommand>, mut receiver: mpsc::UnboundedReceiver<AdminCommand>,
) { ) {
tokio::spawn(async move { tokio::spawn(async move {
// TODO: Use futures when we have long admin commands // TODO: Use futures when we have long admin commands
//let mut futures = FuturesUnordered::new(); //let mut futures = FuturesUnordered::new();
let conduit_user = UserId::try_from(format!("@conduit:{}", db.globals.server_name())) let guard = db.read().await;
let conduit_user =
UserId::try_from(format!("@conduit:{}", guard.globals.server_name()))
.expect("@conduit:server_name is valid"); .expect("@conduit:server_name is valid");
let conduit_room = db let conduit_room = guard
.rooms .rooms
.id_from_alias( .id_from_alias(
&format!("#admins:{}", db.globals.server_name()) &format!("#admins:{}", guard.globals.server_name())
.try_into() .try_into()
.expect("#admins:server_name is a valid room alias"), .expect("#admins:server_name is a valid room alias"),
) )
.unwrap(); .unwrap();
if conduit_room.is_none() { let conduit_room = match conduit_room {
None => {
warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this.");
return;
} }
Some(r) => r,
};
let send_message = |message: message::MessageEventContent| { drop(guard);
if let Some(conduit_room) = &conduit_room {
db.rooms let send_message = |message: message::MessageEventContent,
guard: RwLockReadGuard<'_, Database>,
mutex_lock: &MutexGuard<'_, ()>| {
guard
.rooms
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMessage, event_type: EventType::RoomMessage,
@ -62,36 +74,49 @@ impl Admin {
}, },
&conduit_user, &conduit_user,
&conduit_room, &conduit_room,
&db, &guard,
mutex_lock,
) )
.unwrap(); .unwrap();
}
}; };
loop { loop {
tokio::select! { tokio::select! {
Some(event) = receiver.next() => { Some(event) = receiver.next() => {
let guard = db.read().await;
let mutex = Arc::clone(
guard.globals
.roomid_mutex
.write()
.unwrap()
.entry(conduit_room.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
match event { match event {
AdminCommand::RegisterAppservice(yaml) => { AdminCommand::RegisterAppservice(yaml) => {
db.appservice.register_appservice(yaml).unwrap(); // TODO handle error guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error
} }
AdminCommand::ListAppservices => { AdminCommand::ListAppservices => {
if let Ok(appservices) = db.appservice.iter_ids().map(|ids| ids.collect::<Vec<_>>()) { if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::<Vec<_>>()) {
let count = appservices.len(); let count = appservices.len();
let output = format!( let output = format!(
"Appservices ({}): {}", "Appservices ({}): {}",
count, count,
appservices.into_iter().filter_map(|r| r.ok()).collect::<Vec<_>>().join(", ") appservices.into_iter().filter_map(|r| r.ok()).collect::<Vec<_>>().join(", ")
); );
send_message(message::MessageEventContent::text_plain(output)); send_message(message::MessageEventContent::text_plain(output), guard, &mutex_lock);
} else { } else {
send_message(message::MessageEventContent::text_plain("Failed to get appservices.")); send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard, &mutex_lock);
} }
} }
AdminCommand::SendMessage(message) => { AdminCommand::SendMessage(message) => {
send_message(message); send_message(message, guard, &mutex_lock);
} }
} }
drop(mutex_lock);
} }
} }
} }

4
src/database/appservice.rs

@ -49,7 +49,7 @@ impl Appservice {
) )
} }
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + Send + Sync + '_> { pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + Send + '_> {
Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { Ok(self.id_appserviceregistrations.iter().map(|(id, _)| {
utils::string_from_bytes(&id) utils::string_from_bytes(&id)
.map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations."))
@ -58,7 +58,7 @@ impl Appservice {
pub fn iter_all( pub fn iter_all(
&self, &self,
) -> Result<impl Iterator<Item = Result<(String, serde_yaml::Value)>> + '_ + Send + Sync> { ) -> Result<impl Iterator<Item = Result<(String, serde_yaml::Value)>> + '_ + Send> {
Ok(self.iter_ids()?.filter_map(|id| id.ok()).map(move |id| { Ok(self.iter_ids()?.filter_map(|id| id.ok()).map(move |id| {
Ok(( Ok((
id.clone(), id.clone(),

33
src/database/globals.rs

@ -5,17 +5,18 @@ use ruma::{
client::r0::sync::sync_events, client::r0::sync::sync_events,
federation::discovery::{ServerSigningKeys, VerifyKey}, federation::discovery::{ServerSigningKeys, VerifyKey},
}, },
DeviceId, EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId, DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId,
}; };
use rustls::{ServerCertVerifier, WebPKIVerifier}; use rustls::{ServerCertVerifier, WebPKIVerifier};
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
fs, fs,
future::Future,
path::PathBuf, path::PathBuf,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use tokio::sync::Semaphore; use tokio::sync::{broadcast, Mutex, Semaphore};
use trust_dns_resolver::TokioAsyncResolver; use trust_dns_resolver::TokioAsyncResolver;
use super::abstraction::Tree; use super::abstraction::Tree;
@ -38,6 +39,8 @@ pub struct Globals {
pub bad_event_ratelimiter: Arc<RwLock<BTreeMap<EventId, RateLimitState>>>, pub bad_event_ratelimiter: Arc<RwLock<BTreeMap<EventId, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<BTreeMap<Vec<String>, RateLimitState>>>, pub bad_signature_ratelimiter: Arc<RwLock<BTreeMap<Vec<String>, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<BTreeMap<Box<ServerName>, Arc<Semaphore>>>>, pub servername_ratelimiter: Arc<RwLock<BTreeMap<Box<ServerName>, Arc<Semaphore>>>>,
pub roomid_mutex: RwLock<BTreeMap<RoomId, Arc<Mutex<()>>>>,
pub roomid_mutex_federation: RwLock<BTreeMap<RoomId, Arc<Mutex<()>>>>, // this lock will be held longer
pub sync_receivers: RwLock< pub sync_receivers: RwLock<
BTreeMap< BTreeMap<
(UserId, Box<DeviceId>), (UserId, Box<DeviceId>),
@ -47,6 +50,7 @@ pub struct Globals {
), // since, rx ), // since, rx
>, >,
>, >,
pub rotate: RotationHandler,
} }
struct MatrixServerVerifier { struct MatrixServerVerifier {
@ -82,6 +86,28 @@ impl ServerCertVerifier for MatrixServerVerifier {
} }
} }
pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>);
impl RotationHandler {
pub fn new() -> Self {
let (s, r) = broadcast::channel::<()>(1);
Self(s, r)
}
pub fn watch(&self) -> impl Future<Output = ()> {
let mut r = self.0.subscribe();
async move {
let _ = r.recv().await;
}
}
pub fn fire(&self) {
let _ = self.0.send(());
}
}
impl Globals { impl Globals {
pub fn load( pub fn load(
globals: Arc<dyn Tree>, globals: Arc<dyn Tree>,
@ -167,7 +193,10 @@ impl Globals {
bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())), servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
roomid_mutex: RwLock::new(BTreeMap::new()),
roomid_mutex_federation: RwLock::new(BTreeMap::new()),
sync_receivers: RwLock::new(BTreeMap::new()), sync_receivers: RwLock::new(BTreeMap::new()),
rotate: RotationHandler::new(),
}; };
fs::create_dir_all(s.get_media_folder())?; fs::create_dir_all(s.get_media_folder())?;

2
src/database/pusher.rs

@ -73,7 +73,7 @@ impl PushData {
pub fn get_pusher_senderkeys<'a>( pub fn get_pusher_senderkeys<'a>(
&'a self, &'a self,
sender: &UserId, sender: &UserId,
) -> impl Iterator<Item = Box<[u8]>> + 'a { ) -> impl Iterator<Item = Vec<u8>> + 'a {
let mut prefix = sender.as_bytes().to_vec(); let mut prefix = sender.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);

60
src/database/rooms.rs

@ -2,6 +2,7 @@ mod edus;
pub use edus::RoomEdus; pub use edus::RoomEdus;
use member::MembershipState; use member::MembershipState;
use tokio::sync::MutexGuard;
use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result}; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result};
use log::{debug, error, warn}; use log::{debug, error, warn};
@ -21,7 +22,7 @@ use ruma::{
uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
}; };
use std::{ use std::{
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, BTreeSet, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
mem, mem,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
@ -89,7 +90,7 @@ pub struct Rooms {
impl Rooms { impl Rooms {
/// Builds a StateMap by iterating over all keys that start /// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash. /// with state_hash, this gives the full state for the given state_hash.
pub fn state_full_ids(&self, shortstatehash: u64) -> Result<Vec<EventId>> { pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeSet<EventId>> {
Ok(self Ok(self
.stateid_shorteventid .stateid_shorteventid
.scan_prefix(shortstatehash.to_be_bytes().to_vec()) .scan_prefix(shortstatehash.to_be_bytes().to_vec())
@ -668,11 +669,10 @@ impl Rooms {
&self, &self,
pdu: &PduEvent, pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject, mut pdu_json: CanonicalJsonObject,
count: u64,
pdu_id: &[u8],
leaves: &[EventId], leaves: &[EventId],
db: &Database, db: &Database,
) -> Result<()> { ) -> Result<Vec<u8>> {
// returns pdu id
// Make unsigned fields correct. This is not properly documented in the spec, but state // Make unsigned fields correct. This is not properly documented in the spec, but state
// events need to have previous content in the unsigned field, so clients can easily // events need to have previous content in the unsigned field, so clients can easily
// interpret things like membership changes // interpret things like membership changes
@ -710,20 +710,30 @@ impl Rooms {
self.replace_pdu_leaves(&pdu.room_id, leaves)?; self.replace_pdu_leaves(&pdu.room_id, leaves)?;
let count1 = db.globals.next_count()?;
// Mark as read first so the sending client doesn't get a notification even if appending // Mark as read first so the sending client doesn't get a notification even if appending
// fails // fails
self.edus self.edus
.private_read_set(&pdu.room_id, &pdu.sender, count, &db.globals)?; .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?;
self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; self.reset_notification_counts(&pdu.sender, &pdu.room_id)?;
let count2 = db.globals.next_count()?;
let mut pdu_id = pdu.room_id.as_bytes().to_vec();
pdu_id.push(0xff);
pdu_id.extend_from_slice(&count2.to_be_bytes());
// There's a brief moment of time here where the count is updated but the pdu does not
// exist. This could theoretically lead to dropped pdus, but it's extremely rare
self.pduid_pdu.insert( self.pduid_pdu.insert(
pdu_id, &pdu_id,
&serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"),
)?; )?;
// This also replaces the eventid of any outliers with the correct // This also replaces the eventid of any outliers with the correct
// pduid, removing the place holder. // pduid, removing the place holder.
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; self.eventid_pduid
.insert(pdu.event_id.as_bytes(), &pdu_id)?;
// See if the event matches any known pushers // See if the event matches any known pushers
for user in db for user in db
@ -911,7 +921,7 @@ impl Rooms {
_ => {} _ => {}
} }
Ok(()) Ok(pdu_id)
} }
pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
@ -1078,13 +1088,13 @@ impl Rooms {
.scan_prefix(old_shortstatehash.clone()) .scan_prefix(old_shortstatehash.clone())
// Chop the old_shortstatehash out leaving behind the short state key // Chop the old_shortstatehash out leaving behind the short state key
.map(|(k, v)| (k[old_shortstatehash.len()..].to_vec(), v)) .map(|(k, v)| (k[old_shortstatehash.len()..].to_vec(), v))
.collect::<HashMap<Vec<u8>, Box<[u8]>>>() .collect::<HashMap<Vec<u8>, Vec<u8>>>()
} else { } else {
HashMap::new() HashMap::new()
}; };
if let Some(state_key) = &new_pdu.state_key { if let Some(state_key) = &new_pdu.state_key {
let mut new_state: HashMap<Vec<u8>, Box<[u8]>> = old_state; let mut new_state: HashMap<Vec<u8>, Vec<u8>> = old_state;
let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec(); let mut new_state_key = new_pdu.kind.as_ref().as_bytes().to_vec();
new_state_key.push(0xff); new_state_key.push(0xff);
@ -1200,6 +1210,7 @@ impl Rooms {
sender: &UserId, sender: &UserId,
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
) -> Result<EventId> { ) -> Result<EventId> {
let PduBuilder { let PduBuilder {
event_type, event_type,
@ -1208,7 +1219,7 @@ impl Rooms {
state_key, state_key,
redacts, redacts,
} = pdu_builder; } = pdu_builder;
// TODO: Make sure this isn't called twice in parallel
let prev_events = self let prev_events = self
.get_pdu_leaves(&room_id)? .get_pdu_leaves(&room_id)?
.into_iter() .into_iter()
@ -1356,11 +1367,9 @@ impl Rooms {
// pdu without it's state. This is okay because append_pdu can't fail. // pdu without it's state. This is okay because append_pdu can't fail.
let statehashid = self.append_to_state(&pdu, &db.globals)?; let statehashid = self.append_to_state(&pdu, &db.globals)?;
self.append_pdu( let pdu_id = self.append_pdu(
&pdu, &pdu,
pdu_json, pdu_json,
count,
&pdu_id,
// Since this PDU references all pdu_leaves we can update the leaves // Since this PDU references all pdu_leaves we can update the leaves
// of the room // of the room
&[pdu.event_id.clone()], &[pdu.event_id.clone()],
@ -1450,7 +1459,7 @@ impl Rooms {
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<(Box<[u8]>, PduEvent)>> + 'a { ) -> impl Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a {
self.pdus_since(user_id, room_id, 0) self.pdus_since(user_id, room_id, 0)
} }
@ -1462,7 +1471,7 @@ impl Rooms {
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
since: u64, since: u64,
) -> impl Iterator<Item = Result<(Box<[u8]>, PduEvent)>> + 'a { ) -> impl Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -1491,13 +1500,13 @@ impl Rooms {
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
until: u64, until: u64,
) -> impl Iterator<Item = Result<(Box<[u8]>, PduEvent)>> + 'a { ) -> impl Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a {
// Create the first part of the full pdu id // Create the first part of the full pdu id
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
let mut current = prefix.clone(); let mut current = prefix.clone();
current.extend_from_slice(&until.to_be_bytes()); current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until`
let current: &[u8] = &current; let current: &[u8] = &current;
@ -1523,7 +1532,7 @@ impl Rooms {
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
from: u64, from: u64,
) -> impl Iterator<Item = Result<(Box<[u8]>, PduEvent)>> + 'a { ) -> impl Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a {
// Create the first part of the full pdu id // Create the first part of the full pdu id
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -1784,6 +1793,16 @@ impl Rooms {
db, db,
)?; )?;
} else { } else {
let mutex = Arc::clone(
db.globals
.roomid_mutex
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let mut event = serde_json::from_value::<Raw<member::MemberEventContent>>( let mut event = serde_json::from_value::<Raw<member::MemberEventContent>>(
self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
@ -1811,6 +1830,7 @@ impl Rooms {
user_id, user_id,
room_id, room_id,
db, db,
&mutex_lock,
)?; )?;
} }

57
src/database/sending.rs

@ -30,7 +30,10 @@ use ruma::{
receipt::ReceiptType, receipt::ReceiptType,
MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
}; };
use tokio::{select, sync::Semaphore}; use tokio::{
select,
sync::{RwLock, Semaphore},
};
use super::abstraction::Tree; use super::abstraction::Tree;
@ -90,7 +93,11 @@ enum TransactionStatus {
} }
impl Sending { impl Sending {
pub fn start_handler(&self, db: Arc<Database>, mut receiver: mpsc::UnboundedReceiver<Vec<u8>>) { pub fn start_handler(
&self,
db: Arc<RwLock<Database>>,
mut receiver: mpsc::UnboundedReceiver<Vec<u8>>,
) {
tokio::spawn(async move { tokio::spawn(async move {
let mut futures = FuturesUnordered::new(); let mut futures = FuturesUnordered::new();
@ -98,8 +105,12 @@ impl Sending {
// Retry requests we could not finish yet // Retry requests we could not finish yet
let mut initial_transactions = HashMap::<OutgoingKind, Vec<SendingEventType>>::new(); let mut initial_transactions = HashMap::<OutgoingKind, Vec<SendingEventType>>::new();
let guard = db.read().await;
for (key, outgoing_kind, event) in for (key, outgoing_kind, event) in
db.sending guard
.sending
.servercurrentevents .servercurrentevents
.iter() .iter()
.filter_map(|(key, _)| { .filter_map(|(key, _)| {
@ -117,17 +128,23 @@ impl Sending {
"Dropping some current events: {:?} {:?} {:?}", "Dropping some current events: {:?} {:?} {:?}",
key, outgoing_kind, event key, outgoing_kind, event
); );
db.sending.servercurrentevents.remove(&key).unwrap(); guard.sending.servercurrentevents.remove(&key).unwrap();
continue; continue;
} }
entry.push(event); entry.push(event);
} }
drop(guard);
for (outgoing_kind, events) in initial_transactions { for (outgoing_kind, events) in initial_transactions {
current_transaction_status current_transaction_status
.insert(outgoing_kind.get_prefix(), TransactionStatus::Running); .insert(outgoing_kind.get_prefix(), TransactionStatus::Running);
futures.push(Self::handle_events(outgoing_kind.clone(), events, &db)); futures.push(Self::handle_events(
outgoing_kind.clone(),
events,
Arc::clone(&db),
));
} }
loop { loop {
@ -135,15 +152,17 @@ impl Sending {
Some(response) = futures.next() => { Some(response) = futures.next() => {
match response { match response {
Ok(outgoing_kind) => { Ok(outgoing_kind) => {
let guard = db.read().await;
let prefix = outgoing_kind.get_prefix(); let prefix = outgoing_kind.get_prefix();
for (key, _) in db.sending.servercurrentevents for (key, _) in guard.sending.servercurrentevents
.scan_prefix(prefix.clone()) .scan_prefix(prefix.clone())
{ {
db.sending.servercurrentevents.remove(&key).unwrap(); guard.sending.servercurrentevents.remove(&key).unwrap();
} }
// Find events that have been added since starting the last request // Find events that have been added since starting the last request
let new_events = db.sending.servernamepduids let new_events = guard.sending.servernamepduids
.scan_prefix(prefix.clone()) .scan_prefix(prefix.clone())
.map(|(k, _)| { .map(|(k, _)| {
SendingEventType::Pdu(k[prefix.len()..].to_vec()) SendingEventType::Pdu(k[prefix.len()..].to_vec())
@ -161,17 +180,19 @@ impl Sending {
SendingEventType::Pdu(b) | SendingEventType::Pdu(b) |
SendingEventType::Edu(b) => { SendingEventType::Edu(b) => {
current_key.extend_from_slice(&b); current_key.extend_from_slice(&b);
db.sending.servercurrentevents.insert(&current_key, &[]).unwrap(); guard.sending.servercurrentevents.insert(&current_key, &[]).unwrap();
db.sending.servernamepduids.remove(&current_key).unwrap(); guard.sending.servernamepduids.remove(&current_key).unwrap();
} }
} }
} }
drop(guard);
futures.push( futures.push(
Self::handle_events( Self::handle_events(
outgoing_kind.clone(), outgoing_kind.clone(),
new_events, new_events,
&db, Arc::clone(&db),
) )
); );
} else { } else {
@ -192,13 +213,15 @@ impl Sending {
}, },
Some(key) = receiver.next() => { Some(key) = receiver.next() => {
if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) { if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) {
let guard = db.read().await;
if let Ok(Some(events)) = Self::select_events( if let Ok(Some(events)) = Self::select_events(
&outgoing_kind, &outgoing_kind,
vec![(event, key)], vec![(event, key)],
&mut current_transaction_status, &mut current_transaction_status,
&db &guard
) { ) {
futures.push(Self::handle_events(outgoing_kind, events, &db)); futures.push(Self::handle_events(outgoing_kind, events, Arc::clone(&db)));
} }
} }
} }
@ -357,7 +380,7 @@ impl Sending {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Box<[u8]>) -> Result<()> { pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Vec<u8>) -> Result<()> {
let mut key = b"$".to_vec(); let mut key = b"$".to_vec();
key.extend_from_slice(&senderkey); key.extend_from_slice(&senderkey);
key.push(0xff); key.push(0xff);
@ -403,8 +426,10 @@ impl Sending {
async fn handle_events( async fn handle_events(
kind: OutgoingKind, kind: OutgoingKind,
events: Vec<SendingEventType>, events: Vec<SendingEventType>,
db: &Database, db: Arc<RwLock<Database>>,
) -> std::result::Result<OutgoingKind, (OutgoingKind, Error)> { ) -> std::result::Result<OutgoingKind, (OutgoingKind, Error)> {
let db = db.read().await;
match &kind { match &kind {
OutgoingKind::Appservice(server) => { OutgoingKind::Appservice(server) => {
let mut pdu_jsons = Vec::new(); let mut pdu_jsons = Vec::new();
@ -543,7 +568,7 @@ impl Sending {
&pusher, &pusher,
rules_for_user, rules_for_user,
&pdu, &pdu,
db, &db,
) )
.await .await
.map(|_response| kind.clone()) .map(|_response| kind.clone())

9
src/database/users.rs

@ -726,10 +726,9 @@ impl Users {
json.insert("sender".to_owned(), sender.to_string().into()); json.insert("sender".to_owned(), sender.to_string().into());
json.insert("content".to_owned(), content); json.insert("content".to_owned(), content);
self.todeviceid_events.insert( let value = serde_json::to_vec(&json).expect("Map::to_vec always works");
&key,
&serde_json::to_vec(&json).expect("Map::to_vec always works"), self.todeviceid_events.insert(&key, &value)?;
)?;
Ok(()) Ok(())
} }
@ -774,7 +773,7 @@ impl Users {
for (key, _) in self for (key, _) in self
.todeviceid_events .todeviceid_events
.iter_from(&last, true) .iter_from(&last, true) // this includes last
.take_while(move |(k, _)| k.starts_with(&prefix)) .take_while(move |(k, _)| k.starts_with(&prefix))
.map(|(key, _)| { .map(|(key, _)| {
Ok::<_, Error>(( Ok::<_, Error>((

6
src/error.rs

@ -35,6 +35,12 @@ pub enum Error {
#[from] #[from]
source: rocksdb::Error, source: rocksdb::Error,
}, },
#[cfg(feature = "sqlite")]
#[error("There was a problem with the connection to the sqlite database: {source}")]
SqliteError {
#[from]
source: rusqlite::Error,
},
#[error("Could not generate an image.")] #[error("Could not generate an image.")]
ImageError { ImageError {
#[from] #[from]

27
src/main.rs

@ -30,10 +30,11 @@ use rocket::{
}, },
routes, Request, routes, Request,
}; };
use tokio::sync::RwLock;
use tracing::span; use tracing::span;
use tracing_subscriber::{prelude::*, Registry}; use tracing_subscriber::{prelude::*, Registry};
fn setup_rocket(config: Figment, data: Arc<Database>) -> rocket::Rocket<rocket::Build> { fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<rocket::Build> {
rocket::custom(config) rocket::custom(config)
.manage(data) .manage(data)
.mount( .mount(
@ -193,13 +194,14 @@ async fn main() {
) )
.merge(Env::prefixed("CONDUIT_").global()); .merge(Env::prefixed("CONDUIT_").global());
std::env::set_var("RUST_LOG", "warn");
let config = raw_config let config = raw_config
.extract::<Config>() .extract::<Config>()
.expect("It looks like your config is invalid. Please take a look at the error"); .expect("It looks like your config is invalid. Please take a look at the error");
let db = Database::load_or_create(config.clone()) let mut _span: Option<span::Span> = None;
.await let mut _enter: Option<span::Entered<'_>> = None;
.expect("config is valid");
if config.allow_jaeger { if config.allow_jaeger {
let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline() let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline()
@ -209,19 +211,22 @@ async fn main() {
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
Registry::default().with(telemetry).try_init().unwrap(); Registry::default().with(telemetry).try_init().unwrap();
let root = span!(tracing::Level::INFO, "app_start", work_units = 2); _span = Some(span!(tracing::Level::INFO, "app_start", work_units = 2));
let _enter = root.enter(); _enter = Some(_span.as_ref().unwrap().enter());
let rocket = setup_rocket(raw_config, db);
rocket.launch().await.unwrap();
} else { } else {
std::env::set_var("RUST_LOG", config.log); std::env::set_var("RUST_LOG", &config.log);
tracing_subscriber::fmt::init(); tracing_subscriber::fmt::init();
}
config.warn_deprecated();
let db = Database::load_or_create(config)
.await
.expect("config is valid");
let rocket = setup_rocket(raw_config, db); let rocket = setup_rocket(raw_config, db);
rocket.launch().await.unwrap(); rocket.launch().await.unwrap();
} }
}
#[catch(404)] #[catch(404)]
fn not_found_catcher(_: &Request<'_>) -> String { fn not_found_catcher(_: &Request<'_>) -> String {

10
src/ruma_wrapper.rs

@ -1,4 +1,4 @@
use crate::Error; use crate::{database::DatabaseGuard, Error};
use ruma::{ use ruma::{
api::{client::r0::uiaa::UiaaResponse, OutgoingResponse}, api::{client::r0::uiaa::UiaaResponse, OutgoingResponse},
identifiers::{DeviceId, UserId}, identifiers::{DeviceId, UserId},
@ -9,7 +9,7 @@ use std::ops::Deref;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use { use {
crate::{server_server, Database}, crate::server_server,
log::{debug, warn}, log::{debug, warn},
rocket::{ rocket::{
data::{self, ByteUnit, Data, FromData}, data::{self, ByteUnit, Data, FromData},
@ -17,13 +17,12 @@ use {
outcome::Outcome::*, outcome::Outcome::*,
response::{self, Responder}, response::{self, Responder},
tokio::io::AsyncReadExt, tokio::io::AsyncReadExt,
Request, State, Request,
}, },
ruma::api::{AuthScheme, IncomingRequest}, ruma::api::{AuthScheme, IncomingRequest},
std::collections::BTreeMap, std::collections::BTreeMap,
std::convert::TryFrom, std::convert::TryFrom,
std::io::Cursor, std::io::Cursor,
std::sync::Arc,
}; };
/// This struct converts rocket requests into ruma structs by converting them into http requests /// This struct converts rocket requests into ruma structs by converting them into http requests
@ -49,7 +48,7 @@ where
async fn from_data(request: &'a Request<'_>, data: Data) -> data::Outcome<Self, Self::Error> { async fn from_data(request: &'a Request<'_>, data: Data) -> data::Outcome<Self, Self::Error> {
let metadata = T::Incoming::METADATA; let metadata = T::Incoming::METADATA;
let db = request let db = request
.guard::<State<'_, Arc<Database>>>() .guard::<DatabaseGuard>()
.await .await
.expect("database was loaded"); .expect("database was loaded");
@ -318,6 +317,7 @@ where
}), }),
Err(e) => { Err(e) => {
warn!("{:?}", e); warn!("{:?}", e);
// Bad Json
Failure((Status::raw(583), ())) Failure((Status::raw(583), ()))
} }
} }

221
src/server_server.rs

@ -1,12 +1,13 @@
use crate::{ use crate::{
client_server::{self, claim_keys_helper, get_keys_helper}, client_server::{self, claim_keys_helper, get_keys_helper},
database::DatabaseGuard,
utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma,
}; };
use get_profile_information::v1::ProfileField; use get_profile_information::v1::ProfileField;
use http::header::{HeaderValue, AUTHORIZATION, HOST}; use http::header::{HeaderValue, AUTHORIZATION, HOST};
use log::{debug, error, info, trace, warn}; use log::{debug, error, info, trace, warn};
use regex::Regex; use regex::Regex;
use rocket::{response::content::Json, State}; use rocket::response::content::Json;
use ruma::{ use ruma::{
api::{ api::{
client::error::{Error as RumaError, ErrorKind}, client::error::{Error as RumaError, ErrorKind},
@ -45,7 +46,7 @@ use ruma::{
receipt::ReceiptType, receipt::ReceiptType,
serde::Raw, serde::Raw,
signatures::{CanonicalJsonObject, CanonicalJsonValue}, signatures::{CanonicalJsonObject, CanonicalJsonValue},
state_res::{self, Event, RoomVersion, StateMap}, state_res::{self, RoomVersion, StateMap},
to_device::DeviceIdOrAllDevices, to_device::DeviceIdOrAllDevices,
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName,
ServerSigningKeyId, UserId, ServerSigningKeyId, UserId,
@ -432,7 +433,7 @@ pub async fn request_well_known(
#[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] #[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))]
#[tracing::instrument(skip(db))] #[tracing::instrument(skip(db))]
pub fn get_server_version_route( pub fn get_server_version_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
) -> ConduitResult<get_server_version::v1::Response> { ) -> ConduitResult<get_server_version::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled.")); return Err(Error::bad_config("Federation is disabled."));
@ -450,7 +451,7 @@ pub fn get_server_version_route(
// Response type for this endpoint is Json because we need to calculate a signature for the response // Response type for this endpoint is Json because we need to calculate a signature for the response
#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))]
#[tracing::instrument(skip(db))] #[tracing::instrument(skip(db))]
pub fn get_server_keys_route(db: State<'_, Arc<Database>>) -> Json<String> { pub fn get_server_keys_route(db: DatabaseGuard) -> Json<String> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
// TODO: Use proper types // TODO: Use proper types
return Json("Federation is disabled.".to_owned()); return Json("Federation is disabled.".to_owned());
@ -497,7 +498,7 @@ pub fn get_server_keys_route(db: State<'_, Arc<Database>>) -> Json<String> {
#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] #[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))]
#[tracing::instrument(skip(db))] #[tracing::instrument(skip(db))]
pub fn get_server_keys_deprecated_route(db: State<'_, Arc<Database>>) -> Json<String> { pub fn get_server_keys_deprecated_route(db: DatabaseGuard) -> Json<String> {
get_server_keys_route(db) get_server_keys_route(db)
} }
@ -507,7 +508,7 @@ pub fn get_server_keys_deprecated_route(db: State<'_, Arc<Database>>) -> Json<St
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_public_rooms_filtered_route( pub async fn get_public_rooms_filtered_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_public_rooms_filtered::v1::Request<'_>>, body: Ruma<get_public_rooms_filtered::v1::Request<'_>>,
) -> ConduitResult<get_public_rooms_filtered::v1::Response> { ) -> ConduitResult<get_public_rooms_filtered::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -551,7 +552,7 @@ pub async fn get_public_rooms_filtered_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn get_public_rooms_route( pub async fn get_public_rooms_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_public_rooms::v1::Request<'_>>, body: Ruma<get_public_rooms::v1::Request<'_>>,
) -> ConduitResult<get_public_rooms::v1::Response> { ) -> ConduitResult<get_public_rooms::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -595,7 +596,7 @@ pub async fn get_public_rooms_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn send_transaction_message_route( pub async fn send_transaction_message_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<send_transaction_message::v1::Request<'_>>, body: Ruma<send_transaction_message::v1::Request<'_>>,
) -> ConduitResult<send_transaction_message::v1::Response> { ) -> ConduitResult<send_transaction_message::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -624,13 +625,44 @@ pub async fn send_transaction_message_route(
} }
}; };
// 0. Check the server is in the room
let room_id = match value
.get("room_id")
.and_then(|id| RoomId::try_from(id.as_str()?).ok())
{
Some(id) => id,
None => {
// Event is invalid
resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_string()));
continue;
}
};
let mutex = Arc::clone(
db.globals
.roomid_mutex_federation
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let start_time = Instant::now(); let start_time = Instant::now();
resolved_map.insert( resolved_map.insert(
event_id.clone(), event_id.clone(),
handle_incoming_pdu(&body.origin, &event_id, value, true, &db, &pub_key_map) handle_incoming_pdu(
&body.origin,
&event_id,
&room_id,
value,
true,
&db,
&pub_key_map,
)
.await .await
.map(|_| ()), .map(|_| ()),
); );
drop(mutex_lock);
let elapsed = start_time.elapsed(); let elapsed = start_time.elapsed();
if elapsed > Duration::from_secs(1) { if elapsed > Duration::from_secs(1) {
@ -775,6 +807,8 @@ pub async fn send_transaction_message_route(
} }
} }
db.flush().await?;
Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into())
} }
@ -782,8 +816,8 @@ pub async fn send_transaction_message_route(
type AsyncRecursiveResult<'a, T, E> = Pin<Box<dyn Future<Output = StdResult<T, E>> + 'a + Send>>; type AsyncRecursiveResult<'a, T, E> = Pin<Box<dyn Future<Output = StdResult<T, E>> + 'a + Send>>;
/// When receiving an event one needs to: /// When receiving an event one needs to:
/// 0. Skip the PDU if we already know about it /// 0. Check the server is in the room
/// 1. Check the server is in the room /// 1. Skip the PDU if we already know about it
/// 2. Check signatures, otherwise drop /// 2. Check signatures, otherwise drop
/// 3. Check content hash, redact if doesn't match /// 3. Check content hash, redact if doesn't match
/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not
@ -808,6 +842,7 @@ type AsyncRecursiveResult<'a, T, E> = Pin<Box<dyn Future<Output = StdResult<T, E
pub fn handle_incoming_pdu<'a>( pub fn handle_incoming_pdu<'a>(
origin: &'a ServerName, origin: &'a ServerName,
event_id: &'a EventId, event_id: &'a EventId,
room_id: &'a RoomId,
value: BTreeMap<String, CanonicalJsonValue>, value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool, is_timeline_event: bool,
db: &'a Database, db: &'a Database,
@ -815,24 +850,6 @@ pub fn handle_incoming_pdu<'a>(
) -> AsyncRecursiveResult<'a, Option<Vec<u8>>, String> { ) -> AsyncRecursiveResult<'a, Option<Vec<u8>>, String> {
Box::pin(async move { Box::pin(async move {
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// 0. Skip the PDU if we already have it as a timeline event
if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) {
return Ok(Some(pdu_id.to_vec()));
}
// 1. Check the server is in the room
let room_id = match value
.get("room_id")
.and_then(|id| RoomId::try_from(id.as_str()?).ok())
{
Some(id) => id,
None => {
// Event is invalid
return Err("Event needs a valid RoomId.".to_string());
}
};
match db.rooms.exists(&room_id) { match db.rooms.exists(&room_id) {
Ok(true) => {} Ok(true) => {}
_ => { _ => {
@ -840,6 +857,11 @@ pub fn handle_incoming_pdu<'a>(
} }
} }
// 1. Skip the PDU if we already have it as a timeline event
if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) {
return Ok(Some(pdu_id.to_vec()));
}
// We go through all the signatures we see on the value and fetch the corresponding signing // We go through all the signatures we see on the value and fetch the corresponding signing
// keys // keys
fetch_required_signing_keys(&value, &pub_key_map, db) fetch_required_signing_keys(&value, &pub_key_map, db)
@ -899,7 +921,7 @@ pub fn handle_incoming_pdu<'a>(
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
// EDIT: Step 5 is not applied anymore because it failed too often // EDIT: Step 5 is not applied anymore because it failed too often
debug!("Fetching auth events for {}", incoming_pdu.event_id); debug!("Fetching auth events for {}", incoming_pdu.event_id);
fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, pub_key_map) fetch_and_handle_events(db, origin, &incoming_pdu.auth_events, &room_id, pub_key_map)
.await .await
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
@ -1000,13 +1022,13 @@ pub fn handle_incoming_pdu<'a>(
if incoming_pdu.prev_events.len() == 1 { if incoming_pdu.prev_events.len() == 1 {
let prev_event = &incoming_pdu.prev_events[0]; let prev_event = &incoming_pdu.prev_events[0];
let state_vec = db let state = db
.rooms .rooms
.pdu_shortstatehash(prev_event) .pdu_shortstatehash(prev_event)
.map_err(|_| "Failed talking to db".to_owned())? .map_err(|_| "Failed talking to db".to_owned())?
.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok()) .map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok())
.flatten(); .flatten();
if let Some(mut state_vec) = state_vec { if let Some(mut state) = state {
if db if db
.rooms .rooms
.get_pdu(prev_event) .get_pdu(prev_event)
@ -1016,10 +1038,16 @@ pub fn handle_incoming_pdu<'a>(
.state_key .state_key
.is_some() .is_some()
{ {
state_vec.push(prev_event.clone()); state.insert(prev_event.clone());
} }
state_at_incoming_event = Some( state_at_incoming_event = Some(
fetch_and_handle_events(db, origin, &state_vec, pub_key_map) fetch_and_handle_events(
db,
origin,
&state.into_iter().collect::<Vec<_>>(),
&room_id,
pub_key_map,
)
.await .await
.map_err(|_| "Failed to fetch state events locally".to_owned())? .map_err(|_| "Failed to fetch state events locally".to_owned())?
.into_iter() .into_iter()
@ -1057,8 +1085,14 @@ pub fn handle_incoming_pdu<'a>(
{ {
Ok(res) => { Ok(res) => {
debug!("Fetching state events at event."); debug!("Fetching state events at event.");
let state_vec = let state_vec = match fetch_and_handle_events(
match fetch_and_handle_events(&db, origin, &res.pdu_ids, pub_key_map).await &db,
origin,
&res.pdu_ids,
&room_id,
pub_key_map,
)
.await
{ {
Ok(state) => state, Ok(state) => state,
Err(_) => return Err("Failed to fetch state events.".to_owned()), Err(_) => return Err("Failed to fetch state events.".to_owned()),
@ -1088,7 +1122,13 @@ pub fn handle_incoming_pdu<'a>(
} }
debug!("Fetching auth chain events at event."); debug!("Fetching auth chain events at event.");
match fetch_and_handle_events(&db, origin, &res.auth_chain_ids, pub_key_map) match fetch_and_handle_events(
&db,
origin,
&res.auth_chain_ids,
&room_id,
pub_key_map,
)
.await .await
{ {
Ok(state) => state, Ok(state) => state,
@ -1217,18 +1257,10 @@ pub fn handle_incoming_pdu<'a>(
let mut auth_events = vec![]; let mut auth_events = vec![];
for map in &fork_states { for map in &fork_states {
let mut state_auth = vec![]; let state_auth = map
for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) { .values()
match fetch_and_handle_events(&db, origin, &[auth_id.clone()], pub_key_map) .flat_map(|pdu| pdu.auth_events.clone())
.await .collect();
{
// This should always contain exactly one element when Ok
Ok(events) => state_auth.extend_from_slice(&events),
Err(e) => {
debug!("Event was not present: {}", e);
}
}
}
auth_events.push(state_auth); auth_events.push(state_auth);
} }
@ -1243,10 +1275,7 @@ pub fn handle_incoming_pdu<'a>(
.collect::<StateMap<_>>() .collect::<StateMap<_>>()
}) })
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
auth_events auth_events,
.into_iter()
.map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect())
.collect(),
&|id| { &|id| {
let res = db.rooms.get_pdu(id); let res = db.rooms.get_pdu(id);
if let Err(e) = &res { if let Err(e) = &res {
@ -1280,11 +1309,13 @@ pub fn handle_incoming_pdu<'a>(
pdu_id = Some( pdu_id = Some(
append_incoming_pdu( append_incoming_pdu(
&db, &db,
&room_id,
&incoming_pdu, &incoming_pdu,
val, val,
extremities, extremities,
&state_at_incoming_event, &state_at_incoming_event,
) )
.await
.map_err(|_| "Failed to add pdu to db.".to_owned())?, .map_err(|_| "Failed to add pdu to db.".to_owned())?,
); );
debug!("Appended incoming pdu."); debug!("Appended incoming pdu.");
@ -1322,6 +1353,7 @@ pub(crate) fn fetch_and_handle_events<'a>(
db: &'a Database, db: &'a Database,
origin: &'a ServerName, origin: &'a ServerName,
events: &'a [EventId], events: &'a [EventId],
room_id: &'a RoomId,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>,
) -> AsyncRecursiveResult<'a, Vec<Arc<PduEvent>>, Error> { ) -> AsyncRecursiveResult<'a, Vec<Arc<PduEvent>>, Error> {
Box::pin(async move { Box::pin(async move {
@ -1375,6 +1407,7 @@ pub(crate) fn fetch_and_handle_events<'a>(
match handle_incoming_pdu( match handle_incoming_pdu(
origin, origin,
&event_id, &event_id,
&room_id,
value.clone(), value.clone(),
false, false,
db, db,
@ -1581,32 +1614,38 @@ pub(crate) async fn fetch_signing_keys(
/// Append the incoming event setting the state snapshot to the state from the /// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event. /// server that sent the event.
#[tracing::instrument(skip(db))] #[tracing::instrument(skip(db))]
pub(crate) fn append_incoming_pdu( async fn append_incoming_pdu(
db: &Database, db: &Database,
room_id: &RoomId,
pdu: &PduEvent, pdu: &PduEvent,
pdu_json: CanonicalJsonObject, pdu_json: CanonicalJsonObject,
new_room_leaves: HashSet<EventId>, new_room_leaves: HashSet<EventId>,
state: &StateMap<Arc<PduEvent>>, state: &StateMap<Arc<PduEvent>>,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let count = db.globals.next_count()?; let mutex = Arc::clone(
let mut pdu_id = pdu.room_id.as_bytes().to_vec(); db.globals
pdu_id.push(0xff); .roomid_mutex
pdu_id.extend_from_slice(&count.to_be_bytes()); .write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// We append to state before appending the pdu, so we don't have a moment in time with the // We append to state before appending the pdu, so we don't have a moment in time with the
// pdu without it's state. This is okay because append_pdu can't fail. // pdu without it's state. This is okay because append_pdu can't fail.
db.rooms db.rooms
.set_event_state(&pdu.event_id, state, &db.globals)?; .set_event_state(&pdu.event_id, state, &db.globals)?;
db.rooms.append_pdu( let pdu_id = db.rooms.append_pdu(
pdu, pdu,
pdu_json, pdu_json,
count,
&pdu_id,
&new_room_leaves.into_iter().collect::<Vec<_>>(), &new_room_leaves.into_iter().collect::<Vec<_>>(),
&db, &db,
)?; )?;
drop(mutex_lock);
for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) { for appservice in db.appservice.iter_all()?.filter_map(|r| r.ok()) {
if let Some(namespaces) = appservice.1.get("namespaces") { if let Some(namespaces) = appservice.1.get("namespaces") {
let users = namespaces let users = namespaces
@ -1674,7 +1713,7 @@ pub(crate) fn append_incoming_pdu(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_event_route( pub fn get_event_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_event::v1::Request<'_>>, body: Ruma<get_event::v1::Request<'_>>,
) -> ConduitResult<get_event::v1::Response> { ) -> ConduitResult<get_event::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -1699,7 +1738,7 @@ pub fn get_event_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_missing_events_route( pub fn get_missing_events_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_missing_events::v1::Request<'_>>, body: Ruma<get_missing_events::v1::Request<'_>>,
) -> ConduitResult<get_missing_events::v1::Response> { ) -> ConduitResult<get_missing_events::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -1748,7 +1787,7 @@ pub fn get_missing_events_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_event_authorization_route( pub fn get_event_authorization_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_event_authorization::v1::Request<'_>>, body: Ruma<get_event_authorization::v1::Request<'_>>,
) -> ConduitResult<get_event_authorization::v1::Response> { ) -> ConduitResult<get_event_authorization::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -1792,7 +1831,7 @@ pub fn get_event_authorization_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_room_state_route( pub fn get_room_state_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_room_state::v1::Request<'_>>, body: Ruma<get_room_state::v1::Request<'_>>,
) -> ConduitResult<get_room_state::v1::Response> { ) -> ConduitResult<get_room_state::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -1855,7 +1894,7 @@ pub fn get_room_state_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_room_state_ids_route( pub fn get_room_state_ids_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_room_state_ids::v1::Request<'_>>, body: Ruma<get_room_state_ids::v1::Request<'_>>,
) -> ConduitResult<get_room_state_ids::v1::Response> { ) -> ConduitResult<get_room_state_ids::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -1870,7 +1909,11 @@ pub fn get_room_state_ids_route(
"Pdu state not found.", "Pdu state not found.",
))?; ))?;
let pdu_ids = db.rooms.state_full_ids(shortstatehash)?; let pdu_ids = db
.rooms
.state_full_ids(shortstatehash)?
.into_iter()
.collect();
let mut auth_chain_ids = BTreeSet::<EventId>::new(); let mut auth_chain_ids = BTreeSet::<EventId>::new();
let mut todo = BTreeSet::new(); let mut todo = BTreeSet::new();
@ -1907,7 +1950,7 @@ pub fn get_room_state_ids_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn create_join_event_template_route( pub fn create_join_event_template_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_join_event_template::v1::Request<'_>>, body: Ruma<create_join_event_template::v1::Request<'_>>,
) -> ConduitResult<create_join_event_template::v1::Response> { ) -> ConduitResult<create_join_event_template::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -2076,7 +2119,7 @@ pub fn create_join_event_template_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn create_join_event_route( pub async fn create_join_event_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_join_event::v2::Request<'_>>, body: Ruma<create_join_event::v2::Request<'_>>,
) -> ConduitResult<create_join_event::v2::Response> { ) -> ConduitResult<create_join_event::v2::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -2116,7 +2159,24 @@ pub async fn create_join_event_route(
) )
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?;
let pdu_id = handle_incoming_pdu(&origin, &event_id, value, true, &db, &pub_key_map) let mutex = Arc::clone(
db.globals
.roomid_mutex_federation
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let pdu_id = handle_incoming_pdu(
&origin,
&event_id,
&body.room_id,
value,
true,
&db,
&pub_key_map,
)
.await .await
.map_err(|_| { .map_err(|_| {
Error::BadRequest( Error::BadRequest(
@ -2128,6 +2188,7 @@ pub async fn create_join_event_route(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Could not accept incoming PDU as timeline event.", "Could not accept incoming PDU as timeline event.",
))?; ))?;
drop(mutex_lock);
let state_ids = db.rooms.state_full_ids(shortstatehash)?; let state_ids = db.rooms.state_full_ids(shortstatehash)?;
@ -2161,6 +2222,8 @@ pub async fn create_join_event_route(
db.sending.send_pdu(&server, &pdu_id)?; db.sending.send_pdu(&server, &pdu_id)?;
} }
db.flush().await?;
Ok(create_join_event::v2::Response { Ok(create_join_event::v2::Response {
room_state: RoomState { room_state: RoomState {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2184,7 +2247,7 @@ pub async fn create_join_event_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn create_invite_route( pub async fn create_invite_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<create_invite::v2::Request>, body: Ruma<create_invite::v2::Request>,
) -> ConduitResult<create_invite::v2::Response> { ) -> ConduitResult<create_invite::v2::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -2277,6 +2340,8 @@ pub async fn create_invite_route(
)?; )?;
} }
db.flush().await?;
Ok(create_invite::v2::Response { Ok(create_invite::v2::Response {
event: PduEvent::convert_to_outgoing_federation_event(signed_event), event: PduEvent::convert_to_outgoing_federation_event(signed_event),
} }
@ -2289,7 +2354,7 @@ pub async fn create_invite_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_devices_route( pub fn get_devices_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_devices::v1::Request<'_>>, body: Ruma<get_devices::v1::Request<'_>>,
) -> ConduitResult<get_devices::v1::Response> { ) -> ConduitResult<get_devices::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -2329,7 +2394,7 @@ pub fn get_devices_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_room_information_route( pub fn get_room_information_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_room_information::v1::Request<'_>>, body: Ruma<get_room_information::v1::Request<'_>>,
) -> ConduitResult<get_room_information::v1::Response> { ) -> ConduitResult<get_room_information::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -2357,7 +2422,7 @@ pub fn get_room_information_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_profile_information_route( pub fn get_profile_information_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_profile_information::v1::Request<'_>>, body: Ruma<get_profile_information::v1::Request<'_>>,
) -> ConduitResult<get_profile_information::v1::Response> { ) -> ConduitResult<get_profile_information::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -2390,8 +2455,8 @@ pub fn get_profile_information_route(
post("/_matrix/federation/v1/user/keys/query", data = "<body>") post("/_matrix/federation/v1/user/keys/query", data = "<body>")
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub fn get_keys_route( pub async fn get_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<get_keys::v1::Request>, body: Ruma<get_keys::v1::Request>,
) -> ConduitResult<get_keys::v1::Response> { ) -> ConduitResult<get_keys::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {
@ -2405,6 +2470,8 @@ pub fn get_keys_route(
&db, &db,
)?; )?;
db.flush().await?;
Ok(get_keys::v1::Response { Ok(get_keys::v1::Response {
device_keys: result.device_keys, device_keys: result.device_keys,
master_keys: result.master_keys, master_keys: result.master_keys,
@ -2419,7 +2486,7 @@ pub fn get_keys_route(
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, body))]
pub async fn claim_keys_route( pub async fn claim_keys_route(
db: State<'_, Arc<Database>>, db: DatabaseGuard,
body: Ruma<claim_keys::v1::Request>, body: Ruma<claim_keys::v1::Request>,
) -> ConduitResult<claim_keys::v1::Response> { ) -> ConduitResult<claim_keys::v1::Response> {
if !db.globals.allow_federation() { if !db.globals.allow_federation() {

Loading…
Cancel
Save