Compare commits

..

12 Commits

Author SHA1 Message Date
Timo Kösters 9b57c89df6 Merge branch 'more-event-id-arcs' into 'next' 4 years ago
Jonas Platte 34d3f74f36
Use Arc for EventIds in PDUs 4 years ago
Timo Kösters 11a21fc136 Merge branch 'up-ruma' into 'next' 4 years ago
Jonas Platte 0183d003d0
Revert rename of Ruma<_> parameters 4 years ago
Jonas Platte f712455047
Reduce EventId copying 4 years ago
Jonas Platte 58ea081762
Use int! macro instead of Int::from 4 years ago
Jonas Platte bffddbd487
Simplify identifier parsing code 4 years ago
Jonas Platte 41fef1da64
Remove unnecessary .to_string() calls 4 years ago
Jonas Platte 892a0525f2
Upgrade Ruma 4 years ago
Jonas Platte 1fc616320a
Use struct init shorthand 4 years ago
Timo Kösters 14a178d783 Merge branch 'update-docker-base-image' into 'next' 4 years ago
Jonas Zohren 339a26f56c Update docker images 4 years ago
  1. 1
      .gitignore
  2. 19
      .gitlab-ci.yml
  3. 43
      Cargo.lock
  4. 4
      Cargo.toml
  5. 3
      Dockerfile
  6. 16
      book.toml
  7. 5
      docker/ci-binaries-packaging.Dockerfile
  8. 10
      docs/SUMMARY.md
  9. 29
      docs/index.md
  10. 1
      docs/installation.md
  11. 3
      docs/matrix-client-connection.svg
  12. 3
      docs/matrix-rough-overview.svg
  13. 3
      docs/matrix-with-homeserver-discovery.svg
  14. 1
      docs/setup.md
  15. 226
      docs/setup/configuration-options.md
  16. 82
      docs/setup/domain.md
  17. 232
      docs/setup/installation/binary.md
  18. 121
      docs/setup/installation/docker.md
  19. 28
      src/client_server/account.rs
  20. 6
      src/client_server/capabilities.rs
  21. 2
      src/client_server/directory.rs
  22. 22
      src/client_server/keys.rs
  23. 64
      src/client_server/membership.rs
  24. 16
      src/client_server/message.rs
  25. 10
      src/client_server/push.rs
  26. 4
      src/client_server/redact.rs
  27. 15
      src/client_server/report.rs
  28. 44
      src/client_server/room.rs
  29. 6
      src/client_server/state.rs
  30. 17
      src/client_server/sync.rs
  31. 6
      src/client_server/voip.rs
  32. 20
      src/database.rs
  33. 11
      src/database/admin.rs
  34. 14
      src/database/globals.rs
  35. 8
      src/database/key_backups.rs
  36. 4
      src/database/pusher.rs
  37. 151
      src/database/rooms.rs
  38. 35
      src/database/rooms/edus.rs
  39. 11
      src/database/sending.rs
  40. 32
      src/database/users.rs
  41. 35
      src/pdu.rs
  42. 10
      src/ruma_wrapper.rs
  43. 173
      src/server_server.rs

1
.gitignore vendored

@ -60,7 +60,6 @@ $RECYCLE.BIN/
Rocket.toml Rocket.toml
conduit.toml conduit.toml
conduit.db conduit.db
public/
# Etc. # Etc.
**/*.rs.bk **/*.rs.bk

19
.gitlab-ci.yml

@ -299,25 +299,6 @@ publish:package:
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"'
pages:
stage: deploy
image: rust
variables:
CARGO_HOME: $CI_PROJECT_DIR/cargo
before_script:
- export PATH="$PATH:$CARGO_HOME/bin"
- mdbook --version || cargo install mdbook
script:
- mdbook build -d public
rules:
- if: '$CI_COMMIT_REF_NAME == "master"'
artifacts:
paths:
- public
cache:
paths:
- $CARGO_HOME/bin
# Avoid duplicate pipelines # Avoid duplicate pipelines
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines # See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
workflow: workflow:

43
Cargo.lock generated

@ -1516,12 +1516,6 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "paste"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
[[package]] [[package]]
name = "pear" name = "pear"
version = "0.2.3" version = "0.2.3"
@ -1990,7 +1984,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma" name = "ruma"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"assign", "assign",
"js_int", "js_int",
@ -2011,7 +2005,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-api" name = "ruma-api"
version = "0.18.5" version = "0.18.5"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"bytes", "bytes",
"http", "http",
@ -2027,7 +2021,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-api-macros" name = "ruma-api-macros"
version = "0.18.5" version = "0.18.5"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2038,7 +2032,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-appservice-api" name = "ruma-appservice-api"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"ruma-api", "ruma-api",
"ruma-common", "ruma-common",
@ -2052,7 +2046,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-client-api" name = "ruma-client-api"
version = "0.12.3" version = "0.12.3"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"assign", "assign",
"bytes", "bytes",
@ -2072,7 +2066,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-common" name = "ruma-common"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"js_int", "js_int",
@ -2087,7 +2081,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events" name = "ruma-events"
version = "0.24.6" version = "0.24.6"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"indoc", "indoc",
"js_int", "js_int",
@ -2103,7 +2097,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events-macros" name = "ruma-events-macros"
version = "0.24.6" version = "0.24.6"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2114,7 +2108,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-federation-api" name = "ruma-federation-api"
version = "0.3.1" version = "0.3.1"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2129,9 +2123,8 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers" name = "ruma-identifiers"
version = "0.20.0" version = "0.20.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"paste",
"percent-encoding", "percent-encoding",
"rand 0.8.4", "rand 0.8.4",
"ruma-identifiers-macros", "ruma-identifiers-macros",
@ -2144,7 +2137,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-macros" name = "ruma-identifiers-macros"
version = "0.20.0" version = "0.20.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"quote", "quote",
"ruma-identifiers-validation", "ruma-identifiers-validation",
@ -2154,7 +2147,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-validation" name = "ruma-identifiers-validation"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"thiserror", "thiserror",
] ]
@ -2162,7 +2155,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identity-service-api" name = "ruma-identity-service-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2175,7 +2168,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-push-gateway-api" name = "ruma-push-gateway-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2190,7 +2183,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-serde" name = "ruma-serde"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"bytes", "bytes",
"form_urlencoded", "form_urlencoded",
@ -2204,7 +2197,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-serde-macros" name = "ruma-serde-macros"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2215,7 +2208,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-signatures" name = "ruma-signatures"
version = "0.9.0" version = "0.9.0"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"ed25519-dalek", "ed25519-dalek",
@ -2232,7 +2225,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-state-res" name = "ruma-state-res"
version = "0.4.1" version = "0.4.1"
source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"itertools 0.10.1", "itertools 0.10.1",
"js_int", "js_int",

4
Cargo.toml

@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request
# Used for matrix spec type definitions and helpers # Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "e7f01ca55a1eff437bad754bf0554cc09f44ec2a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
@ -40,7 +40,7 @@ serde_json = { version = "1.0.67", features = ["raw_value"] }
# Used for appservice registration files # Used for appservice registration files
serde_yaml = "0.8.20" serde_yaml = "0.8.20"
# Used for pdu definition # Used for pdu definition
serde = "1.0.130" serde = { version = "1.0.130", features = ["rc"] }
# Used for secure identifiers # Used for secure identifiers
rand = "0.8.4" rand = "0.8.4"
# Used to hash passwords # Used to hash passwords

3
Dockerfile

@ -32,7 +32,7 @@ RUN touch src/main.rs && touch src/lib.rs && cargo build --release
# --------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------
# Stuff below this line actually ends up in the resulting docker image # Stuff below this line actually ends up in the resulting docker image
# --------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------
FROM docker.io/alpine:3.14 AS runner FROM docker.io/alpine:3.15.0 AS runner
# Standard port on which Conduit launches. # Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose. # You still need to map the port when using the docker command or docker-compose.
@ -46,7 +46,6 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. # libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \ RUN apk add --no-cache \
ca-certificates \ ca-certificates \
curl \
libgcc libgcc

16
book.toml

@ -1,16 +0,0 @@
[book]
title = "Conduit Docs"
author = "The Conduit contributors"
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol"
language = "en"
src = "docs"
[rust]
edition = "2018"
[build]
build-dir = "public"
create-missing = true
[output.html.search]
limit-results = 15

5
docker/ci-binaries-packaging.Dockerfile

@ -1,14 +1,13 @@
# syntax=docker/dockerfile:1 # syntax=docker/dockerfile:1
# --------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------
# This Dockerfile is intended to be built as part of Conduit's CI pipeline. # This Dockerfile is intended to be built as part of Conduit's CI pipeline.
# It does not build Conduit in Docker, but just copies the matching build artifact from the build job. # It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs.
# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary.
# #
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching. # It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
# Credit's for the original Dockerfile: Weasy666. # Credit's for the original Dockerfile: Weasy666.
# --------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------
FROM docker.io/alpine:3.14 AS runner FROM docker.io/alpine:3.15.0 AS runner
# Standard port on which Conduit launches. # Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose. # You still need to map the port when using the docker command or docker-compose.

10
docs/SUMMARY.md

@ -1,10 +0,0 @@
# Conduit Matrix Server
[Introduction](index.md)
- [Setup](setup.md)
- [Domain](setup/domain.md)
- [Installation](installation.md)
- [Binary](setup/installation/binary.md)
- [Docker](setup/installation/docker.md)
- [Configuration](setup/configuration-options.md)

29
docs/index.md

@ -1,29 +0,0 @@
## What is Conduit?
**Conduit** is an efficient Matrix homeserver written in [Rust](https://rust-lang.org), that's easy to set up and just works.
You can install it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company.
## Setup your own Conduit
1. [Choose your domain](setup/domain.md)
2. Choose your deployment type
- Binary
- Docker
3. Configuration your Conduit
- Environment variables
- Config file
## Donate
- Liberapay: <https://liberapay.com/timokoesters/>\
- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
## Contribute
See [the git repository](https://gitlab.com/famedly/conduit/) for more info on how to work on Conduit's code.
## Licenses
Conduit's code and documentation is licensed under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0).
The [lightning bolt logo](https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg) is licensed under [Creative Commons Attribution 4.0 International (CC BY 4.0)](https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md)

1
docs/installation.md

@ -1 +0,0 @@
# Installation

3
docs/matrix-client-connection.svg

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 6.8 KiB

3
docs/matrix-rough-overview.svg

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 6.7 KiB

3
docs/matrix-with-homeserver-discovery.svg

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 12 KiB

1
docs/setup.md

@ -1 +0,0 @@
# Setup

226
docs/setup/configuration-options.md

@ -1,226 +0,0 @@
# Configuring Conduit
Conduit can be configured via a config file (conventionally called Conduit.toml) or environment variables.
If a config option is set in both the config file and as an environment variable, the environment variable takes precedence.
You absolutely need to set the environment variable `CONDUIT_CONFIG_FILE` to either point to a config file (
e.g. `CONDUIT_CONFIG_FILE=/etc/conduit/Conduit.toml`) or to an empty string (`CONDUIT_CONFIG_FILE=''`) if you want to
configure Conduit with just environment variables.
---
## Mandatory config options
Mandatory variables must be configured in order for Conduit to run properly.
### Server Name
- Config file key: `server_name`
- Envirnoment variable: `CONDUIT_SERVER_NAME`
- Default value: _None, you will need to choose your own._
The server_name is the name of this server. It is used as a suffix for user and room ids. Example: If you set it
to `conduit.rs`, your usernames will look like `@somebody:conduit.rs`.
The Conduit server needs to be reachable at https://your.server.name/ on port 443 (client-server) and 8448 (
server-server) OR you can create /.well-known files to redirect requests. See
the [Client-Server specs](https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client) and
the [Server-Server specs](https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server) for more
information.
### Database Path
- Config file key: `database_path`
- Envirnoment variable: `CONDUIT_DATABASE_PATH`
- Default value: _None, but many people like to use `/var/lib/conduit/`_.
A **directory** where Conduit stores its database and media files. This directory must exist, have enough free space and
be readable and writable by the user Conduit is running as.
What does _enough free space_ mean? It heavily on the amount of messages your Conduit server will see and the amount and
size of media files users on your Conduit server send. As a rule of thumb, you should have at least 10 GB of free space
left. You should be comfortable for quite some time with 50 GB.
### Port
- Config file key: `port`
- Environment variable: `CONDUIT_PORT`
- Default value: _None, but many people like to use `6167`_.
The TCP port Conduit will listen on for connections. The port needs to be free (no other program is listeing on it).
Conduit does currently (2021-12) not offer HTTPS by itself. Only unencrypted HTTP requests will be accepted on this
port. Unless you know what you are doing, this port should not be exposed to the internet. Instead, use a reverse proxy
capable of doing TLS to offer your Conduit server to the internet via HTTPS.
---
## Optional configuration options
These config options come with defaults and don't need to be configured for Conduit to run. That said, you should still
check them to make sure that your Conduit server behaves like you want it to do.
### Maximum request size
- Config file key: `max_request_size`
- Environment variable: `CONDUIT_MAX_REQUEST_SIZE`
- Default value: `20_000_000` (~= 20 MB)
The maximum size in bytes for incoming requests to Conduit. You can use underscores to improve readability.
This will effectively limit the size for images, videos and other files users on your Conduit server can send.
### Allow Registration?
- Config file key: `allow_registration`
- Environment variable: `CONDUIT_ALLOW_REGISTRATION`
- Default value: `true`
- Possible values: `true`, `false`
It this is set to `false`, no new users can register accounts on your Conduit server. Already registered users will not
be affected from this setting and can continue to user your server.
The first user to ever register on your Conduit server will be considered the admin account and is automatically invited
into the admin room.
### Allow Encryption?
- Config file key: `allow_encryption`
- Environment variable: `CONDUIT_ALLOW_ENCRYPTION`
- Default value: `true`
- Possible values: `true`, `false`
If this is set to `false`, Conduit disables the ability for users to create encrypted chats. Existing encrypted chats
may continue to work.
### Allow federation?
- Config file key: `allow_federation`
- Environment variable: `CONDUIT_ALLOW_FEDERATION`
- Default value: `false`
- Possible values: `true`, `false`
Federation means that users from different Matrix servers can chat with each other. E.g. `@mathew:matrix.org` can chat
with `@timo:conduit.rs`.
If this option is set to `false`, users on your Conduit server can only talk with other users on your Conduit server.
Federation with other servers needs to happen over HTTPS, so make sure you have set up a reverse proxy.
You can use the [Federation Tester](https://federationtester.matrix.org/) to test your federation ability.
### Jaeger Tracing
- Config file key: `allow_jaeger`
- Environment variable: `CONDUIT_ALLOW_JAEGER`
- Default value: `false`
- Possible values: `true`, `false`
Enable Jaeger to support monitoring and troubleshooting through Jaeger.
If you don't know what Jaeger is, you can safely leave this set to `false`.
### Trusted servers
- Config file key: `trusted_servers`
- Environment variable: `CONDUIT_TRUSTED_SERVERS`
- Default value: `[]`
- Possible values: JSON-Array of server domains, e.g. `["matrix.org"]` or `["matrix.org", "conduit.rs"]`.
Matrix servers have so-called "server keys", which authenticate messages from their users. Because your Conduit server
might not know the server keys from every server it encounters, it can ask a _trusted server_ for them. This speeds
things up for rooms with people from a lot of different servers.
You should only set this to include trustworthy servers. Most people consider `["matrix.org"]` to bea good default.
Only relevant if you have federation enabled.
### Limit amount of concurrent requests
- Config file key: `max_concurrent_requests`
- Environment variable: `CONDUIT_MAX_CONCURRENT_REQUESTS`
- Default value: `100`
- Suggested values: `1` - `1000` (u16)
How many requests Conduit can make at the same time. This affects federation with other Matrix servers, push
notifications and app_services.
// TODO Timo: When does it make sense to change this?
### Configure logging
- Config file key: `log`
- Environment variable: `CONDUIT_LOG`
- Default value: `info,state_res=warn,rocket=off,_=off,sled=off`
Configures which kind of messages Conduit logs.
> // TODO: Better and more thorough explanation
### Worker threads
- Config file key: `workers`
- Environment variable: `CONDUIT_WORKERS`
- Default value: cpu core count \* 2
- Possible values: // TODO
> // TODO: Which thing exactly threads? What not?
### Listening address
- Config file key: `address`
- Environment variable: `CONDUIT_ADDRESS`
- Default value: `127.0.0.1`
- Possible values: Valid IP addresses.
Which IP address conduit is listening on. 127.0.0.1 means that Conduit can only be accessed from the same server or
through a reverse proxy on that server. If you want it to be accessible from any network interface (which you should
not, because other matrix servers should talk to your Conduit via a reverse proxy and not directly), you can set it
to `0.0.0.0`.
### Database cache capacity
- Config file key: `db_cache_capacity_mb`
- Environment variable: `CONDUIT_DB_CACHE_CAPACITY_MB`
- Default value: `200`
- Possible values: `true`, `false`
The total amount of memory (RAM) that the database cache will be able to use.
> // TODO: this needs clearification: In RAM or on disk and for what exactly?
### PDU cache capacity
- Config file key: `pdu_cache_capacity`
- Environment variable: `CONDUIT_PDU_CACHE_CAPACITY`
- Default value: `100_000`
- Suggested values: `1_000` - `1_000_000` (u32)
The total capacity (read: number of items) the pdu cache can hold in memory. Setting this to a lower number may slow
Conduit down, as it must fetch more data from the database. Increasing it will mean that Conduit will start to use more
memory as the cache slowly fills up.
### SQLite WAL clean interval
- Config file key: `sqlite_wal_clean_second_interval`
- Environment variable: `CONDUIT_SQLITE_WAL_CLEAN_SECOND_INTERVAL`
- Default value: `60` (every 60 seconds)
- Suggested values: `1` - `3600` (u32)
How often the WAL file should be cleaned up. The WAL file will be written to until cleaned up, after which it restarts
writing from the beginning.
The file's size will correspond to how long it could write to it in one go. (e.g. if conduit writes 100MB of data to the
database inbetween that period, the file will grow to 100MB). You can read more about that in
the [SQLite Docs](https://www.sqlite.org/draft/wal.html).
Reducing this down too much can offset the benefits of using a WAL at all. However, having this too high can result in a
large WAL file.
Only relevant when using SQLite as the database.
### Still undocumented config options
- `tracing_flame`
- `proxy`
- `jwt_secret`

82
docs/setup/domain.md

@ -1,82 +0,0 @@
# Domains?
## How Matrix (commonly) works
If you think about the way Matrix works, you will probably have this rough idea in your head:
![Sketch of two users, "@alice:a.com" and "@bob:b.com", each connected via an arrow to one server, "a.com" and "b.com". There is a connection arrow between those servers.](../matrix-rough-overview.svg)
So as @alice:a.com you are connected to your Matrix Homeserver which is a.com and if you chat with @bob:b.com, your Homeserver talks with Bob's Homeserver, b.com.
But how does your Matrix App know how to talk with your Homeserver, and how does your Homeserver know how to talk with other Homeservers?
## How your Matrix Client finds your Homeserver
1. You open your Matrix Client for the first time and type in your homeserver url: `example.com`.
2. Your Matrix client sends an http(s) request to `example.com/.well-known/matrix/client`.
3. The Webserver answers with a file like this:
```json
{
"m.homeserver": {
"base_url": "https://matrix.example.com"
}
}
```
4. Your Matrix client connects to `https://matrix.example.com` and checks if it is a Matrix home server.
5. You can register and log in as `@alice:example.com`
So the actual homeserver can have a different url (`matrix.example.com`) than the domain in your username (`example.com`). So the sketch from above get's a bit more complicated:
![Sketch of a user which as a first arrow going to a box labeled "Webserver A a.com" and a second arrow going to another box labeled "Homeserver A matrix.a.com"](../matrix-client-connection.svg)
## How your Homeserver finds other Homeservers
Now you want to message `@bob:b.com`. How does your Homeserver know how to talk to bob?
1. You send a message in a chat with `@bob:b.com`.
2. Your Homeserver sends an HTTP request to `b.com/.well-known/matrix/server`.
3. The webserver of `b.com` answers with a file like this:
```json
{
"m.server": "matrix.b.com:443"
}
```
4. Your homeserver connects to `https://matrix.b.com` on port 443 (the default port for HTTPS) and delivers your message to `@bob:b.com`.
Our diagram got even more complicated:
![](../matrix-with-homeserver-discovery.svg)
## Does this need to be this complicated?
The Matrix protocol is rather flexible to allow for big Homeservers with millions of users. This split between your domain (`a.com`) and the actual Homeserver url (`matrix.a.com`) allows to run a Website for `a.com` on one physical server and the Matrix Homeserver on another physical server and other fancy, but complicated magic.
## How to choose a setup for your own Homeserver
So what to do?
You will probably have a fancy domain, let's call it `example.com`.
Your users shall have names like `@chris:example.com` or `@danielle:example.com`.
In our guides, we assume you control the DNS settings for your domain and are able to setup a subdomain.
We also assume that you have a Linux server with a public IP address with ports 443 (and maybe 80) opened up in your firewall and some free disk space.
You should setup a reverse-proxying webserver like nginx, apache, traefik or caddy to
- Serve `https://example.com/.well-known/matrix/client`
- Serve `https://example.com/.well-known/matrix/server`
- Proxy `https://matrix.example.com/` to Conduit
(Again: Substitute example.com with your own, even better domain.)
## Alright, let's get started!
We documented a few routes to get to a working Conduit server. Choose which one you like:
- The manual way (best tested)
- Docker compose
- Docker + Reverse proxy

232
docs/setup/installation/binary.md

@ -1,232 +0,0 @@
## Installing Conduit
Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore
only offer Linux binaries.
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
| CPU Architecture | Download stable version |
| ------------------------------------------- | ------------------------------ |
| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] |
| armv6 | [Download][armv6-musl-master] |
| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] |
| armv8 / aarch64 | [Download][armv8-musl-master] |
[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
```bash
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
$ sudo chmod +x /usr/local/bin/matrix-conduit
```
Alternatively, you may compile the binary yourself using
```bash
$ cargo build --release
```
Note that this currently requires Rust 1.50.
If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md).
## Adding a Conduit user
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
you to make sure that the file permissions are correctly set up.
In Debian you can use this command to create a Conduit user:
```bash
sudo adduser --system conduit --no-create-home
```
## Setting up a systemd service
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
server reboots. Simply paste the default systemd service you can find below into
`/etc/systemd/system/conduit.service`.
```systemd
[Unit]
Description=Conduit Matrix Server
After=network.target
[Service]
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
User=conduit
Group=nogroup
Restart=always
ExecStart=/usr/local/bin/matrix-conduit
[Install]
WantedBy=multi-user.target
```
Finally, run
```bash
$ sudo systemctl daemon-reload
```
## Creating the Conduit configuration file
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
to read it. You need to change at least the server name.**
```toml
[global]
# The server_name is the name of this server. It is used as a suffix for user
# and room ids. Examples: matrix.org, conduit.rs
# The Conduit server needs to be reachable at https://your.server.name/ on port
# 443 (client-server) and 8448 (federation) OR you can create /.well-known
# files to redirect requests. See
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
# for more information
# YOU NEED TO EDIT THIS
#server_name = "your.server.name"
# This is the only directory where Conduit will save its data
database_path = "/var/lib/matrix-conduit/conduit_db"
# The port Conduit will be running on. You need to set up a reverse proxy in
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
# 443 and 8448 will be forwarded to the Conduit instance running on this port
port = 6167
# Max size for uploads
max_request_size = 20_000_000 # in bytes
# Enables registration. If set to false, no users can register on this server.
allow_registration = true
# Disable encryption, so no new encrypted rooms can be created
# Note: existing rooms will continue to work
allow_encryption = true
allow_federation = true
trusted_servers = ["matrix.org"]
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
#workers = 4 # default: cpu core count * 2
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
# The total amount of memory that the database will use.
#db_cache_capacity_mb = 200
```
## Setting the correct file permissions
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
Debian:
```bash
sudo chown -R conduit:nogroup /etc/matrix-conduit
```
If you use the default database path you also need to run this:
```bash
sudo mkdir -p /var/lib/matrix-conduit/conduit_db
sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db
```
## Setting up the Reverse Proxy
This depends on whether you use Apache, Nginx or another web server.
### Apache
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
```apache
Listen 8448
<VirtualHost *:443 *:8448>
ServerName your.server.name # EDIT THIS
AllowEncodedSlashes NoDecode
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
</VirtualHost>
```
**You need to make some edits again.** When you are done, run
```bash
$ sudo systemctl reload apache2
```
### Nginx
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
```nginx
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
listen 8448 ssl http2;
listen [::]:8448 ssl http2;
server_name your.server.name; # EDIT THIS
merge_slashes off;
location /_matrix/ {
proxy_pass http://127.0.0.1:6167$request_uri;
proxy_set_header Host $http_host;
proxy_buffering off;
}
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
ssl_certificate_key /etc/letsencrypt/live/your.server.name/privkey.pem; # EDIT THIS
ssl_trusted_certificate /etc/letsencrypt/live/your.server.name/chain.pem; # EDIT THIS
include /etc/letsencrypt/options-ssl-nginx.conf;
}
```
**You need to make some edits again.** When you are done, run
```bash
$ sudo systemctl reload nginx
```
## SSL Certificate
The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this:
```bash
$ sudo certbot -d your.server.name
```
## You're done!
Now you can start Conduit with:
```bash
$ sudo systemctl start conduit
```
Set it to start automatically when your system boots with:
```bash
$ sudo systemctl enable conduit
```
## How do I know it works?
You can open <https://app.element.io>, enter your homeserver and try to register.
You can also use these commands as a quick health check.
```bash
$ curl https://your.server.name/_matrix/client/versions
$ curl https://your.server.name:8448/_matrix/client/versions
```
If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).

121
docs/setup/installation/docker.md

@ -1,121 +0,0 @@
# Deploy using Docker
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
>
> See the [Domain section](../domain.md) for more about this.
## Standalone Docker image
A typical way to start Conduit with Docker looks like this:
```bash
docker run \
--name "conduit" \
--detach \
--restart "unless-stopped" \
--env CONDUIT_CONFIG="" \
--env CONDUIT_SERVER_NAME="domain.tld" \
--env CONDUIT_ADDRESS="0.0.0.0" \
--env CONDUIT_ALLOW_REGISTRATION="true" \
--env CONDUIT_ALLOW_FEDERATION="true" \
--env CONDUIT_DATABASE_PATH="/srv/conduit/.local/share/conduit" \
--volume "/var/lib/conduit/:/srv/conduit/.local/share/conduit" \
--publish 6167:6167
matrixconduit/matrix-conduit:latest
```
<details>
<summary>Explanation of the above command</summary>
- `--name "conduit"` Create a container named "conduit"
- `--detach` Detach from current terminal and run in the background
- `--restart=unless-stopped` Restart if Conduit crashes or after reboots
- `--env CONDUIT_CONFIG=""` Tell Conduit to only use environment variables (instead of a config file)
- `--env CONDUIT_ADDRESS="0.0.0.0" ` Answer to requests from outside of the container...
- `--publish 6167:6167` ... on port 6167
</details>
After a few seconds, your Conduit should be listening on port 6167.
If you have Element Desktop installed on the same machine, try creating an account on the server `localhost:6167`.
To check how your Conduit container is doing, you can use the commands `docker ps` and `docker logs conduit`.
### Next steps
For a functioning Matrix server which you can connect to from your phone and which federates with other Matrix servers, you still need to configure a reverse proxy to:
- Forward https traffic as http to the Conduit container on port 6167
- Serve .well-known files (see the [Domain section](../domain.md)) to tell Servers and clients where to find your Conduit
- Optionally serve a Matrix Web Client like Element Web or FluffyChat Web.
## Docker Compose
We also provide a `docker-compose.yaml` file, which includes everything you need to run a complete Matrix Homeserver:
- Conduit
- The reverse proxy
- Matrix Web Client
To get started:
1. Copy the `docker-compose.yaml` file to a new directory on your server.
2. Edit it and adjust your configuration.
3. Start it with
```bash
docker-compose up .d
```
### Use Traefik as Proxy
As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making containerized app and services available through the web. With the
two provided files, [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml), it is
equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is
the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to either expose ports
`443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`.
With the service `well-known` we use a single `nginx` container that will serve those two files.
So...step by step:
1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames.
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
5. Create the files needed by the `well-known` service.
- `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping)
```nginx
server {
server_name <SUBDOMAIN>.<DOMAIN>;
listen 80 default_server;
location /.well-known/matrix/ {
root /var/www;
default_type application/json;
add_header Access-Control-Allow-Origin *;
}
}
```
- `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping)
```json
{
"m.homeserver": {
"base_url": "https://<SUBDOMAIN>.<DOMAIN>"
}
}
```
- `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping)
```json
{
"m.server": "<SUBDOMAIN>.<DOMAIN>:443"
}
```
6. Run `docker-compose up -d`
7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin.

28
src/client_server/account.rs

@ -1,8 +1,4 @@
use std::{ use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
};
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
@ -11,10 +7,9 @@ use ruma::{
error::ErrorKind, error::ErrorKind,
r0::{ r0::{
account::{ account::{
change_password, deactivate, get_username_availability, register, whoami, change_password, deactivate, get_3pids, get_username_availability, register,
ThirdPartyIdRemovalStatus, whoami, ThirdPartyIdRemovalStatus,
}, },
contact::get_contacts,
uiaa::{AuthFlow, AuthType, UiaaInfo}, uiaa::{AuthFlow, AuthType, UiaaInfo},
}, },
}, },
@ -282,7 +277,7 @@ pub async fn register_route(
let mut content = RoomCreateEventContent::new(conduit_user.clone()); let mut content = RoomCreateEventContent::new(conduit_user.clone());
content.federate = true; content.federate = true;
content.predecessor = None; content.predecessor = None;
content.room_version = RoomVersionId::Version6; content.room_version = RoomVersionId::V6;
// 1. The room create event // 1. The room create event
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
@ -311,6 +306,7 @@ pub async fn register_route(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -397,8 +393,7 @@ pub async fn register_route(
)?; )?;
// 6. Events implied by name and topic // 6. Events implied by name and topic
let room_name = let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name()))
Box::<RoomName>::try_from(format!("{} Admin Room", db.globals.server_name()))
.expect("Room name is valid"); .expect("Room name is valid");
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
@ -433,7 +428,7 @@ pub async fn register_route(
)?; )?;
// Room alias // Room alias
let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name()) let alias: Box<RoomAliasId> = format!("#admins:{}", db.globals.server_name())
.try_into() .try_into()
.expect("#admins:server_name is a valid alias name"); .expect("#admins:server_name is a valid alias name");
@ -469,6 +464,7 @@ pub async fn register_route(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -491,6 +487,7 @@ pub async fn register_route(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -707,6 +704,7 @@ pub async fn deactivate_route(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}; };
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
@ -757,9 +755,9 @@ pub async fn deactivate_route(
get("/_matrix/client/r0/account/3pid", data = "<body>") get("/_matrix/client/r0/account/3pid", data = "<body>")
)] )]
pub async fn third_party_route( pub async fn third_party_route(
body: Ruma<get_contacts::Request>, body: Ruma<get_3pids::Request>,
) -> ConduitResult<get_contacts::Response> { ) -> ConduitResult<get_3pids::Response> {
let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(get_contacts::Response::new(Vec::new()).into()) Ok(get_3pids::Response::new(Vec::new()).into())
} }

6
src/client_server/capabilities.rs

@ -22,12 +22,12 @@ pub async fn get_capabilities_route(
_body: Ruma<get_capabilities::Request>, _body: Ruma<get_capabilities::Request>,
) -> ConduitResult<get_capabilities::Response> { ) -> ConduitResult<get_capabilities::Response> {
let mut available = BTreeMap::new(); let mut available = BTreeMap::new();
available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); available.insert(RoomVersionId::V5, RoomVersionStability::Stable);
available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); available.insert(RoomVersionId::V6, RoomVersionStability::Stable);
let mut capabilities = Capabilities::new(); let mut capabilities = Capabilities::new();
capabilities.room_versions = RoomVersionsCapability { capabilities.room_versions = RoomVersionsCapability {
default: RoomVersionId::Version6, default: RoomVersionId::V6,
available, available,
}; };

2
src/client_server/directory.rs

@ -167,7 +167,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
other_server, other_server,
federation::directory::get_public_rooms_filtered::v1::Request { federation::directory::get_public_rooms_filtered::v1::Request {
limit, limit,
since: since.as_deref(), since,
filter: Filter { filter: Filter {
generic_search_term: filter.generic_search_term.as_deref(), generic_search_term: filter.generic_search_term.as_deref(),
}, },

22
src/client_server/keys.rs

@ -316,7 +316,7 @@ pub async fn get_key_changes_route(
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>( pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
sender_user: Option<&UserId>, sender_user: Option<&UserId>,
device_keys_input: &BTreeMap<UserId, Vec<Box<DeviceId>>>, device_keys_input: &BTreeMap<Box<UserId>, Vec<Box<DeviceId>>>,
allowed_signatures: F, allowed_signatures: F,
db: &Database, db: &Database,
) -> Result<get_keys::Response> { ) -> Result<get_keys::Response> {
@ -328,6 +328,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
let mut get_over_federation = HashMap::new(); let mut get_over_federation = HashMap::new();
for (user_id, device_ids) in device_keys_input { for (user_id, device_ids) in device_keys_input {
let user_id: &UserId = &**user_id;
if user_id.server_name() != db.globals.server_name() { if user_id.server_name() != db.globals.server_name() {
get_over_federation get_over_federation
.entry(user_id.server_name()) .entry(user_id.server_name())
@ -355,11 +357,11 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
container.insert(device_id, keys); container.insert(device_id, keys);
} }
} }
device_keys.insert(user_id.clone(), container); device_keys.insert(user_id.to_owned(), container);
} else { } else {
for device_id in device_ids { for device_id in device_ids {
let mut container = BTreeMap::new(); let mut container = BTreeMap::new();
if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), device_id)? { if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? {
let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
Error::BadRequest( Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -371,24 +373,24 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
device_display_name: metadata.display_name, device_display_name: metadata.display_name,
}; };
container.insert(device_id.clone(), keys); container.insert(device_id.to_owned(), keys);
} }
device_keys.insert(user_id.clone(), container); device_keys.insert(user_id.to_owned(), container);
} }
} }
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
master_keys.insert(user_id.clone(), master_key); master_keys.insert(user_id.to_owned(), master_key);
} }
if let Some(self_signing_key) = db if let Some(self_signing_key) = db
.users .users
.get_self_signing_key(user_id, &allowed_signatures)? .get_self_signing_key(user_id, &allowed_signatures)?
{ {
self_signing_keys.insert(user_id.clone(), self_signing_key); self_signing_keys.insert(user_id.to_owned(), self_signing_key);
} }
if Some(user_id) == sender_user { if Some(user_id) == sender_user {
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
user_signing_keys.insert(user_id.clone(), user_signing_key); user_signing_keys.insert(user_id.to_owned(), user_signing_key);
} }
} }
} }
@ -400,7 +402,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
.map(|(server, vec)| async move { .map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new(); let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec { for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.clone(), keys.clone()); device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
} }
( (
server, server,
@ -440,7 +442,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
} }
pub(crate) async fn claim_keys_helper( pub(crate) async fn claim_keys_helper(
one_time_keys_input: &BTreeMap<UserId, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>, one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
db: &Database, db: &Database,
) -> Result<claim_keys::Response> { ) -> Result<claim_keys::Response> {
let mut one_time_keys = BTreeMap::new(); let mut one_time_keys = BTreeMap::new();

64
src/client_server/membership.rs

@ -31,6 +31,7 @@ use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
iter,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
@ -64,7 +65,7 @@ pub async fn join_room_by_id_route(
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::try_from(sender).ok()) .filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()) .map(|user| user.server_name().to_owned())
.collect(); .collect();
@ -72,7 +73,7 @@ pub async fn join_room_by_id_route(
let ret = join_room_by_id_helper( let ret = join_room_by_id_helper(
&db, &db,
body.sender_user.as_ref(), body.sender_user.as_deref(),
&body.room_id, &body.room_id,
&servers, &servers,
body.third_party_signed.as_ref(), body.third_party_signed.as_ref(),
@ -99,9 +100,10 @@ pub async fn join_room_by_id_or_alias_route(
db: DatabaseGuard, db: DatabaseGuard,
body: Ruma<join_room_by_id_or_alias::Request<'_>>, body: Ruma<join_room_by_id_or_alias::Request<'_>>,
) -> ConduitResult<join_room_by_id_or_alias::Response> { ) -> ConduitResult<join_room_by_id_or_alias::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_deref().expect("user is authenticated");
let body = body.body;
let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) { let (servers, room_id) = match Box::<RoomId>::try_from(body.room_id_or_alias) {
Ok(room_id) => { Ok(room_id) => {
let mut servers: HashSet<_> = db let mut servers: HashSet<_> = db
.rooms .rooms
@ -111,7 +113,7 @@ pub async fn join_room_by_id_or_alias_route(
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::try_from(sender).ok()) .filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()) .map(|user| user.server_name().to_owned())
.collect(); .collect();
@ -127,7 +129,7 @@ pub async fn join_room_by_id_or_alias_route(
let join_room_response = join_room_by_id_helper( let join_room_response = join_room_by_id_helper(
&db, &db,
body.sender_user.as_ref(), Some(sender_user),
&room_id, &room_id,
&servers, &servers,
body.third_party_signed.as_ref(), body.third_party_signed.as_ref(),
@ -284,6 +286,7 @@ pub async fn ban_user_route(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}), }),
|event| { |event| {
serde_json::from_str(event.content.get()) serde_json::from_str(event.content.get())
@ -531,7 +534,7 @@ async fn join_room_by_id_helper(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -551,7 +554,7 @@ async fn join_room_by_id_helper(
federation::membership::create_join_event_template::v1::Request { federation::membership::create_join_event_template::v1::Request {
room_id, room_id,
user_id: sender_user, user_id: sender_user,
ver: &[RoomVersionId::Version5, RoomVersionId::Version6], ver: &[RoomVersionId::V5, RoomVersionId::V6],
}, },
) )
.await; .await;
@ -567,8 +570,7 @@ async fn join_room_by_id_helper(
let room_version = match make_join_response.room_version { let room_version = match make_join_response.room_version {
Some(room_version) Some(room_version)
if room_version == RoomVersionId::Version5 if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 =>
|| room_version == RoomVersionId::Version6 =>
{ {
room_version room_version
} }
@ -603,6 +605,7 @@ async fn join_room_by_id_helper(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
); );
@ -620,11 +623,12 @@ async fn join_room_by_id_helper(
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
// Generate event id // Generate event id
let event_id = EventId::try_from(&*format!( let event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&join_event_stub, &room_version) ruma::signatures::reference_hash(&join_event_stub, &room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) );
let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
// Add event_id back // Add event_id back
@ -643,7 +647,7 @@ async fn join_room_by_id_helper(
remote_server, remote_server,
federation::membership::create_join_event::v2::Request { federation::membership::create_join_event::v2::Request {
room_id, room_id,
event_id: &event_id, event_id,
pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()),
}, },
) )
@ -651,7 +655,7 @@ async fn join_room_by_id_helper(
db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; db.rooms.get_or_create_shortroomid(room_id, &db.globals)?;
let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) let pdu = PduEvent::from_id_val(event_id, join_event.clone())
.map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?;
let mut state = HashMap::new(); let mut state = HashMap::new();
@ -739,7 +743,7 @@ async fn join_room_by_id_helper(
db.rooms.append_pdu( db.rooms.append_pdu(
&pdu, &pdu,
utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"),
&[pdu.event_id.clone()], iter::once(&*pdu.event_id),
db, db,
)?; )?;
@ -755,6 +759,7 @@ async fn join_room_by_id_helper(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}; };
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
@ -776,7 +781,7 @@ async fn join_room_by_id_helper(
db.flush()?; db.flush()?;
Ok(join_room_by_id::Response::new(room_id.clone()).into()) Ok(join_room_by_id::Response::new(room_id.to_owned()).into())
} }
fn validate_and_add_event_id( fn validate_and_add_event_id(
@ -784,12 +789,12 @@ fn validate_and_add_event_id(
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
db: &Database, db: &Database,
) -> Result<(EventId, CanonicalJsonObject)> { ) -> Result<(Box<EventId>, CanonicalJsonObject)> {
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response") Error::BadServerResponse("Invalid PDU in server response")
})?; })?;
let event_id = EventId::try_from(&*format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -856,7 +861,7 @@ pub(crate) async fn invite_helper<'a>(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -892,9 +897,7 @@ pub(crate) async fn invite_helper<'a>(
// If there was no create event yet, assume we are creating a version 6 room right now // If there was no create event yet, assume we are creating a version 6 room right now
let room_version_id = create_event_content let room_version_id = create_event_content
.map_or(RoomVersionId::Version6, |create_event| { .map_or(RoomVersionId::V6, |create_event| create_event.room_version);
create_event.room_version
});
let room_version = let room_version =
RoomVersion::new(&room_version_id).expect("room version is supported"); RoomVersion::new(&room_version_id).expect("room version is supported");
@ -906,6 +909,7 @@ pub(crate) async fn invite_helper<'a>(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("member event is valid value"); .expect("member event is valid value");
@ -939,9 +943,9 @@ pub(crate) async fn invite_helper<'a>(
} }
let pdu = PduEvent { let pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater"), event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: room_id.clone(), room_id: room_id.to_owned(),
sender: sender_user.clone(), sender: sender_user.to_owned(),
origin_server_ts: utils::millis_since_unix_epoch() origin_server_ts: utils::millis_since_unix_epoch()
.try_into() .try_into()
.expect("time is valid"), .expect("time is valid"),
@ -1014,11 +1018,12 @@ pub(crate) async fn invite_helper<'a>(
}; };
// Generate event id // Generate event id
let expected_event_id = EventId::try_from(&*format!( let expected_event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id) ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) );
let expected_event_id = <&EventId>::try_from(expected_event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
let response = db let response = db
@ -1028,7 +1033,7 @@ pub(crate) async fn invite_helper<'a>(
user_id.server_name(), user_id.server_name(),
create_invite::v2::Request { create_invite::v2::Request {
room_id, room_id,
event_id: &expected_event_id, event_id: expected_event_id,
room_version: &room_version_id, room_version: &room_version_id,
event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()),
invite_room_state: &invite_room_state, invite_room_state: &invite_room_state,
@ -1100,7 +1105,7 @@ pub(crate) async fn invite_helper<'a>(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -1116,6 +1121,7 @@ pub(crate) async fn invite_helper<'a>(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(user_id)?, blurhash: db.users.blurhash(user_id)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,

16
src/client_server/message.rs

@ -5,13 +5,8 @@ use ruma::{
r0::message::{get_message_events, send_message_event}, r0::message::{get_message_events, send_message_event},
}, },
events::EventType, events::EventType,
EventId,
};
use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
}; };
use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, put}; use rocket::{get, put};
@ -67,10 +62,9 @@ pub async fn send_message_event_route(
)); ));
} }
let event_id = EventId::try_from( let event_id = utils::string_from_bytes(&response)
utils::string_from_bytes(&response) .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?, .try_into()
)
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
return Ok(send_message_event::Response { event_id }.into()); return Ok(send_message_event::Response { event_id }.into());
} }
@ -104,7 +98,7 @@ pub async fn send_message_event_route(
db.flush()?; db.flush()?;
Ok(send_message_event::Response::new(event_id).into()) Ok(send_message_event::Response::new((*event_id).to_owned()).into())
} }
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages` /// # `GET /_matrix/client/r0/rooms/{roomId}/messages`

10
src/client_server/push.rs

@ -105,15 +105,15 @@ pub async fn get_pushrule_route(
/// Creates a single specified push rule for this user. /// Creates a single specified push rule for this user.
#[cfg_attr( #[cfg_attr(
feature = "conduit_bin", feature = "conduit_bin",
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<req>") put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>")
)] )]
#[tracing::instrument(skip(db, req))] #[tracing::instrument(skip(db, body))]
pub async fn set_pushrule_route( pub async fn set_pushrule_route(
db: DatabaseGuard, db: DatabaseGuard,
req: Ruma<set_pushrule::Request<'_>>, body: Ruma<set_pushrule::Request<'_>>,
) -> ConduitResult<set_pushrule::Response> { ) -> ConduitResult<set_pushrule::Response> {
let sender_user = req.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = req.body; let body = body.body;
if body.scope != "global" { if body.scope != "global" {
return Err(Error::BadRequest( return Err(Error::BadRequest(

4
src/client_server/redact.rs

@ -25,6 +25,7 @@ pub async fn redact_event_route(
body: Ruma<redact_event::Request<'_>>, body: Ruma<redact_event::Request<'_>>,
) -> ConduitResult<redact_event::Response> { ) -> ConduitResult<redact_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
db.globals db.globals
@ -45,7 +46,7 @@ pub async fn redact_event_route(
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: Some(body.event_id.clone()), redacts: Some(body.event_id.into()),
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -57,5 +58,6 @@ pub async fn redact_event_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(redact_event::Response { event_id }.into()) Ok(redact_event::Response { event_id }.into())
} }

15
src/client_server/report.rs

@ -1,8 +1,11 @@
use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma}; use crate::{
database::{admin::AdminCommand, DatabaseGuard},
ConduitResult, Error, Ruma,
};
use ruma::{ use ruma::{
api::client::{error::ErrorKind, r0::room::report_content}, api::client::{error::ErrorKind, r0::room::report_content},
events::room::message, events::room::message,
Int, int,
}; };
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
@ -33,7 +36,7 @@ pub async fn report_event_route(
} }
}; };
if body.score > Int::from(0) || body.score < Int::from(-100) { if body.score > int!(0) || body.score < int!(-100) {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Invalid score, must be within 0 to -100", "Invalid score, must be within 0 to -100",
@ -57,8 +60,7 @@ pub async fn report_event_route(
Report Score: {}\n\ Report Score: {}\n\
Report Reason: {}", Report Reason: {}",
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
) ),
.to_owned(),
format!( format!(
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0}\">{0}\ "<details><summary>Report received from: <a href=\"https://matrix.to/#/{0}\">{0}\
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1}</code>\ </a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1}</code>\
@ -72,8 +74,7 @@ pub async fn report_event_route(
pdu.sender, pdu.sender,
body.score, body.score,
RawStr::new(&body.reason).html_escape() RawStr::new(&body.reason).html_escape()
) ),
.to_owned(),
), ),
)); ));

44
src/client_server/room.rs

@ -22,16 +22,12 @@ use ruma::{
}, },
EventType, EventType,
}, },
int,
serde::{CanonicalJsonObject, JsonObject}, serde::{CanonicalJsonObject, JsonObject},
RoomAliasId, RoomId, RoomVersionId, RoomAliasId, RoomId, RoomVersionId,
}; };
use serde_json::{json, value::to_raw_value}; use serde_json::{json, value::to_raw_value};
use std::{ use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc};
cmp::max,
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
};
use tracing::{info, warn}; use tracing::{info, warn};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
@ -88,14 +84,16 @@ pub async fn create_room_route(
)); ));
} }
let alias: Option<RoomAliasId> = let alias: Option<Box<RoomAliasId>> =
body.room_alias_name body.room_alias_name
.as_ref() .as_ref()
.map_or(Ok(None), |localpart| { .map_or(Ok(None), |localpart| {
// TODO: Check for invalid characters and maximum length // TODO: Check for invalid characters and maximum length
let alias = let alias =
RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; .map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.")
})?;
if db.rooms.id_from_alias(&alias)?.is_some() { if db.rooms.id_from_alias(&alias)?.is_some() {
Err(Error::BadRequest( Err(Error::BadRequest(
@ -109,7 +107,7 @@ pub async fn create_room_route(
let room_version = match body.room_version.clone() { let room_version = match body.room_version.clone() {
Some(room_version) => { Some(room_version) => {
if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 {
room_version room_version
} else { } else {
return Err(Error::BadRequest( return Err(Error::BadRequest(
@ -118,7 +116,7 @@ pub async fn create_room_route(
)); ));
} }
} }
None => RoomVersionId::Version6, None => RoomVersionId::V6,
}; };
let content = match &body.creation_content { let content = match &body.creation_content {
@ -164,7 +162,7 @@ pub async fn create_room_route(
.get(), .get(),
); );
if let Err(_) = de_result { if de_result.is_err() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::BadJson, ErrorKind::BadJson,
"Invalid creation content", "Invalid creation content",
@ -198,6 +196,7 @@ pub async fn create_room_route(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -223,11 +222,11 @@ pub async fn create_room_route(
}); });
let mut users = BTreeMap::new(); let mut users = BTreeMap::new();
users.insert(sender_user.clone(), 100.into()); users.insert(sender_user.clone(), int!(100));
if preset == create_room::RoomPreset::TrustedPrivateChat { if preset == create_room::RoomPreset::TrustedPrivateChat {
for invite_ in &body.invite { for invite_ in &body.invite {
users.insert(invite_.clone(), 100.into()); users.insert(invite_.clone(), int!(100));
} }
} }
@ -269,7 +268,7 @@ pub async fn create_room_route(
PduBuilder { PduBuilder {
event_type: EventType::RoomCanonicalAlias, event_type: EventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent { content: to_raw_value(&RoomCanonicalAliasEventContent {
alias: Some(room_alias_id.clone()), alias: Some(room_alias_id.to_owned()),
alt_aliases: vec![], alt_aliases: vec![],
}) })
.expect("We checked that alias earlier, it must be fine"), .expect("We checked that alias earlier, it must be fine"),
@ -505,10 +504,7 @@ pub async fn upgrade_room_route(
) -> ConduitResult<upgrade_room::Response> { ) -> ConduitResult<upgrade_room::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if !matches!( if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) {
body.new_version,
RoomVersionId::Version5 | RoomVersionId::Version6
) {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::UnsupportedRoomVersion, ErrorKind::UnsupportedRoomVersion,
"This server does not support that room version.", "This server does not support that room version.",
@ -575,7 +571,7 @@ pub async fn upgrade_room_route(
// Use the m.room.tombstone event as the predecessor // Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new( let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
body.room_id.clone(), body.room_id.clone(),
tombstone_event_id, (*tombstone_event_id).to_owned(),
)); ));
// Send a m.room.create event containing a predecessor field and the applicable room_version // Send a m.room.create event containing a predecessor field and the applicable room_version
@ -605,7 +601,7 @@ pub async fn upgrade_room_route(
.get(), .get(),
); );
if let Err(_) = de_result { if de_result.is_err() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::BadJson, ErrorKind::BadJson,
"Error forming creation event", "Error forming creation event",
@ -639,6 +635,7 @@ pub async fn upgrade_room_route(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -703,10 +700,7 @@ pub async fn upgrade_room_route(
.map_err(|_| Error::bad_database("Invalid room event in database."))?; .map_err(|_| Error::bad_database("Invalid room event in database."))?;
// Setting events_default and invite to the greater of 50 and users_default + 1 // Setting events_default and invite to the greater of 50 and users_default + 1
let new_level = max( let new_level = max(int!(50), power_levels_event_content.users_default + int!(1));
50.into(),
power_levels_event_content.users_default + 1.into(),
);
power_levels_event_content.events_default = new_level; power_levels_event_content.events_default = new_level;
power_levels_event_content.invite = new_level; power_levels_event_content.invite = new_level;

6
src/client_server/state.rs

@ -52,6 +52,7 @@ pub async fn send_state_event_for_key_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::Response { event_id }.into()) Ok(send_state_event::Response { event_id }.into())
} }
@ -93,6 +94,7 @@ pub async fn send_state_event_for_empty_key_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::Response { event_id }.into()) Ok(send_state_event::Response { event_id }.into())
} }
@ -267,7 +269,7 @@ async fn send_state_event_for_key_helper(
event_type: EventType, event_type: EventType,
json: &Raw<AnyStateEventContent>, json: &Raw<AnyStateEventContent>,
state_key: String, state_key: String,
) -> Result<EventId> { ) -> Result<Arc<EventId>> {
let sender_user = sender; let sender_user = sender;
// TODO: Review this check, error if event is unparsable, use event type, allow alias if it // TODO: Review this check, error if event is unparsable, use event type, allow alias if it
@ -303,7 +305,7 @@ async fn send_state_event_for_key_helper(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;

17
src/client_server/sync.rs

@ -10,7 +10,7 @@ use ruma::{
}; };
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::TryInto,
sync::Arc, sync::Arc,
time::Duration, time::Duration,
}; };
@ -61,8 +61,9 @@ pub async fn sync_events_route(
db: DatabaseGuard, db: DatabaseGuard,
body: Ruma<sync_events::Request<'_>>, body: Ruma<sync_events::Request<'_>>,
) -> Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> { ) -> Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated");
let body = body.body;
let arc_db = Arc::new(db); let arc_db = Arc::new(db);
@ -132,7 +133,7 @@ pub async fn sync_events_route(
async fn sync_helper_wrapper( async fn sync_helper_wrapper(
db: Arc<DatabaseGuard>, db: Arc<DatabaseGuard>,
sender_user: UserId, sender_user: Box<UserId>,
sender_device: Box<DeviceId>, sender_device: Box<DeviceId>,
since: Option<String>, since: Option<String>,
full_state: bool, full_state: bool,
@ -176,7 +177,7 @@ async fn sync_helper_wrapper(
async fn sync_helper( async fn sync_helper(
db: Arc<DatabaseGuard>, db: Arc<DatabaseGuard>,
sender_user: UserId, sender_user: Box<UserId>,
sender_device: Box<DeviceId>, sender_device: Box<DeviceId>,
since: Option<String>, since: Option<String>,
full_state: bool, full_state: bool,
@ -296,7 +297,7 @@ async fn sync_helper(
})?; })?;
if let Some(state_key) = &pdu.state_key { if let Some(state_key) = &pdu.state_key {
let user_id = UserId::try_from(state_key.clone()).map_err(|_| { let user_id = UserId::parse(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.") Error::bad_database("Invalid UserId in member PDU.")
})?; })?;
@ -424,7 +425,7 @@ async fn sync_helper(
} }
if let Some(state_key) = &state_event.state_key { if let Some(state_key) = &state_event.state_key {
let user_id = UserId::try_from(state_key.clone()) let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user { if user_id == sender_user {
@ -793,7 +794,7 @@ fn share_encrypted_room(
) -> Result<bool> { ) -> Result<bool> {
Ok(db Ok(db
.rooms .rooms
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
.filter(|room_id| room_id != ignore_room) .filter(|room_id| room_id != ignore_room)
.filter_map(|other_room_id| { .filter_map(|other_room_id| {

6
src/client_server/voip.rs

@ -26,7 +26,7 @@ pub async fn turn_server_route(
let turn_secret = db.globals.turn_secret(); let turn_secret = db.globals.turn_secret();
let (username, password) = if turn_secret != "" { let (username, password) = if !turn_secret.is_empty() {
let expiry = SecondsSinceUnixEpoch::from_system_time( let expiry = SecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()), SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
) )
@ -49,8 +49,8 @@ pub async fn turn_server_route(
}; };
Ok(get_turn_server_info::Response { Ok(get_turn_server_info::Response {
username: username, username,
password: password, password,
uris: db.globals.turn_uris().to_vec(), uris: db.globals.turn_uris().to_vec(),
ttl: Duration::from_secs(db.globals.turn_ttl()), ttl: Duration::from_secs(db.globals.turn_ttl()),
} }

20
src/database.rs

@ -476,10 +476,9 @@ impl Database {
if db.globals.database_version()? < 6 { if db.globals.database_version()? < 6 {
// Set room member count // Set room member count
for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { for (roomid, _) in db.rooms.roomid_shortstatehash.iter() {
let room_id = let string = utils::string_from_bytes(&roomid).unwrap();
RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap(); let room_id = <&RoomId>::try_from(string.as_str()).unwrap();
db.rooms.update_joined_count(room_id, &db)?;
db.rooms.update_joined_count(&room_id, &db)?;
} }
db.globals.bump_database_version(6)?; db.globals.bump_database_version(6)?;
@ -489,7 +488,7 @@ impl Database {
if db.globals.database_version()? < 7 { if db.globals.database_version()? < 7 {
// Upgrade state store // Upgrade state store
let mut last_roomstates: HashMap<RoomId, u64> = HashMap::new(); let mut last_roomstates: HashMap<Box<RoomId>, u64> = HashMap::new();
let mut current_sstatehash: Option<u64> = None; let mut current_sstatehash: Option<u64> = None;
let mut current_room = None; let mut current_room = None;
let mut current_state = HashSet::new(); let mut current_state = HashSet::new();
@ -570,7 +569,7 @@ impl Database {
if let Some(current_sstatehash) = current_sstatehash { if let Some(current_sstatehash) = current_sstatehash {
handle_state( handle_state(
current_sstatehash, current_sstatehash,
current_room.as_ref().unwrap(), current_room.as_deref().unwrap(),
current_state, current_state,
&mut last_roomstates, &mut last_roomstates,
)?; )?;
@ -586,10 +585,9 @@ impl Database {
.get(&seventid) .get(&seventid)
.unwrap() .unwrap()
.unwrap(); .unwrap();
let event_id = let string = utils::string_from_bytes(&event_id).unwrap();
EventId::try_from(utils::string_from_bytes(&event_id).unwrap()) let event_id = <&EventId>::try_from(string.as_str()).unwrap();
.unwrap(); let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap();
let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap();
if Some(&pdu.room_id) != current_room.as_ref() { if Some(&pdu.room_id) != current_room.as_ref() {
current_room = Some(pdu.room_id.clone()); current_room = Some(pdu.room_id.clone());
@ -604,7 +602,7 @@ impl Database {
if let Some(current_sstatehash) = current_sstatehash { if let Some(current_sstatehash) = current_sstatehash {
handle_state( handle_state(
current_sstatehash, current_sstatehash,
current_room.as_ref().unwrap(), current_room.as_deref().unwrap(),
current_state, current_state,
&mut last_roomstates, &mut last_roomstates,
)?; )?;

11
src/database/admin.rs

@ -1,7 +1,4 @@
use std::{ use std::{convert::TryInto, sync::Arc};
convert::{TryFrom, TryInto},
sync::Arc,
};
use crate::{pdu::PduBuilder, Database}; use crate::{pdu::PduBuilder, Database};
use rocket::futures::{channel::mpsc, stream::StreamExt}; use rocket::futures::{channel::mpsc, stream::StreamExt};
@ -36,14 +33,14 @@ impl Admin {
let guard = db.read().await; let guard = db.read().await;
let conduit_user = let conduit_user = UserId::parse(format!("@conduit:{}", guard.globals.server_name()))
UserId::try_from(format!("@conduit:{}", guard.globals.server_name()))
.expect("@conduit:server_name is valid"); .expect("@conduit:server_name is valid");
let conduit_room = guard let conduit_room = guard
.rooms .rooms
.id_from_alias( .id_from_alias(
&format!("#admins:{}", guard.globals.server_name()) format!("#admins:{}", guard.globals.server_name())
.as_str()
.try_into() .try_into()
.expect("#admins:server_name is a valid room alias"), .expect("#admins:server_name is a valid room alias"),
) )

14
src/database/globals.rs

@ -40,13 +40,13 @@ pub struct Globals {
dns_resolver: TokioAsyncResolver, dns_resolver: TokioAsyncResolver,
jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>, jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>,
pub(super) server_signingkeys: Arc<dyn Tree>, pub(super) server_signingkeys: Arc<dyn Tree>,
pub bad_event_ratelimiter: Arc<RwLock<HashMap<EventId, RateLimitState>>>, pub bad_event_ratelimiter: Arc<RwLock<HashMap<Box<EventId>, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>, pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<HashMap<Box<ServerName>, Arc<Semaphore>>>>, pub servername_ratelimiter: Arc<RwLock<HashMap<Box<ServerName>, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<HashMap<(UserId, Box<DeviceId>), SyncHandle>>, pub sync_receivers: RwLock<HashMap<(Box<UserId>, Box<DeviceId>), SyncHandle>>,
pub roomid_mutex_insert: RwLock<HashMap<RoomId, Arc<Mutex<()>>>>, pub roomid_mutex_insert: RwLock<HashMap<Box<RoomId>, Arc<Mutex<()>>>>,
pub roomid_mutex_state: RwLock<HashMap<RoomId, Arc<TokioMutex<()>>>>, pub roomid_mutex_state: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<RoomId, Arc<TokioMutex<()>>>>, // this lock will be held longer pub roomid_mutex_federation: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>, // this lock will be held longer
pub rotate: RotationHandler, pub rotate: RotationHandler,
} }
@ -254,7 +254,7 @@ impl Globals {
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> { ) -> Result<BTreeMap<Box<ServerSigningKeyId>, VerifyKey>> {
// Not atomic, but this is not critical // Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
@ -293,7 +293,7 @@ impl Globals {
pub fn signing_keys_for( pub fn signing_keys_for(
&self, &self,
origin: &ServerName, origin: &ServerName,
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> { ) -> Result<BTreeMap<Box<ServerSigningKeyId>, VerifyKey>> {
let signingkeys = self let signingkeys = self
.server_signingkeys .server_signingkeys
.get(origin.as_bytes())? .get(origin.as_bytes())?

8
src/database/key_backups.rs

@ -6,7 +6,7 @@ use ruma::{
}, },
RoomId, UserId, RoomId, UserId,
}; };
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; use std::{collections::BTreeMap, sync::Arc};
use super::abstraction::Tree; use super::abstraction::Tree;
@ -209,13 +209,13 @@ impl KeyBackups {
&self, &self,
user_id: &UserId, user_id: &UserId,
version: &str, version: &str,
) -> Result<BTreeMap<RoomId, RoomKeyBackup>> { ) -> Result<BTreeMap<Box<RoomId>, RoomKeyBackup>> {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
prefix.extend_from_slice(version.as_bytes()); prefix.extend_from_slice(version.as_bytes());
prefix.push(0xff); prefix.push(0xff);
let mut rooms = BTreeMap::<RoomId, RoomKeyBackup>::new(); let mut rooms = BTreeMap::<Box<RoomId>, RoomKeyBackup>::new();
for result in self for result in self
.backupkeyid_backup .backupkeyid_backup
@ -231,7 +231,7 @@ impl KeyBackups {
Error::bad_database("backupkeyid_backup session_id is invalid.") Error::bad_database("backupkeyid_backup session_id is invalid.")
})?; })?;
let room_id = RoomId::try_from( let room_id = RoomId::parse(
utils::string_from_bytes(parts.next().ok_or_else(|| { utils::string_from_bytes(parts.next().ok_or_else(|| {
Error::bad_database("backupkeyid_backup key is invalid.") Error::bad_database("backupkeyid_backup key is invalid.")
})?) })?)

4
src/database/pusher.rs

@ -234,7 +234,7 @@ pub fn get_actions<'a>(
db: &Database, db: &Database,
) -> Result<&'a [Action]> { ) -> Result<&'a [Action]> {
let ctx = PushConditionRoomCtx { let ctx = PushConditionRoomCtx {
room_id: room_id.clone(), room_id: room_id.to_owned(),
member_count: 10_u32.into(), // TODO: get member count efficiently member_count: 10_u32.into(), // TODO: get member count efficiently
user_display_name: db user_display_name: db
.users .users
@ -277,7 +277,7 @@ async fn send_notice(
let mut data_minus_url = pusher.data.clone(); let mut data_minus_url = pusher.data.clone();
// The url must be stripped off according to spec // The url must be stripped off according to spec
data_minus_url.url = None; data_minus_url.url = None;
device.data = Some(data_minus_url); device.data = data_minus_url;
// Tweaks are only added if the format is NOT event_id_only // Tweaks are only added if the format is NOT event_id_only
if !event_id_only { if !event_id_only {

151
src/database/rooms.rs

@ -36,6 +36,8 @@ use std::{
borrow::Cow, borrow::Cow,
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
fmt::Debug,
iter,
mem::size_of, mem::size_of,
sync::{Arc, Mutex, RwLock}, sync::{Arc, Mutex, RwLock},
time::Instant, time::Instant,
@ -107,14 +109,14 @@ pub struct Rooms {
/// RoomId + EventId -> Parent PDU EventId. /// RoomId + EventId -> Parent PDU EventId.
pub(super) referencedevents: Arc<dyn Tree>, pub(super) referencedevents: Arc<dyn Tree>,
pub(super) pdu_cache: Mutex<LruCache<EventId, Arc<PduEvent>>>, pub(super) pdu_cache: Mutex<LruCache<Box<EventId>, Arc<PduEvent>>>,
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>, pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>, pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
pub(super) eventidshort_cache: Mutex<LruCache<EventId, u64>>, pub(super) eventidshort_cache: Mutex<LruCache<Box<EventId>, u64>>,
pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>, pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>,
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>, pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>,
pub(super) our_real_users_cache: RwLock<HashMap<RoomId, Arc<HashSet<UserId>>>>, pub(super) our_real_users_cache: RwLock<HashMap<Box<RoomId>, Arc<HashSet<Box<UserId>>>>>,
pub(super) appservice_in_room_cache: RwLock<HashMap<RoomId, HashMap<String, bool>>>, pub(super) appservice_in_room_cache: RwLock<HashMap<Box<RoomId>, HashMap<String, bool>>>,
pub(super) stateinfo_cache: Mutex< pub(super) stateinfo_cache: Mutex<
LruCache< LruCache<
u64, u64,
@ -434,7 +436,7 @@ impl Rooms {
None => continue, None => continue,
}; };
let user_id = match UserId::try_from(state_key) { let user_id = match UserId::parse(state_key) {
Ok(id) => id, Ok(id) => id,
Err(_) => continue, Err(_) => continue,
}; };
@ -742,7 +744,7 @@ impl Rooms {
self.eventidshort_cache self.eventidshort_cache
.lock() .lock()
.unwrap() .unwrap()
.insert(event_id.clone(), short); .insert(event_id.to_owned(), short);
Ok(short) Ok(short)
} }
@ -871,12 +873,10 @@ impl Rooms {
.get(&shorteventid.to_be_bytes())? .get(&shorteventid.to_be_bytes())?
.ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?;
let event_id = Arc::new( let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| {
EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") Error::bad_database("EventID in shorteventid_eventid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?, .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?;
);
self.shorteventid_cache self.shorteventid_cache
.lock() .lock()
@ -1112,7 +1112,7 @@ impl Rooms {
self.pdu_cache self.pdu_cache
.lock() .lock()
.unwrap() .unwrap()
.insert(event_id.clone(), Arc::clone(&pdu)); .insert(event_id.to_owned(), Arc::clone(&pdu));
Ok(Some(pdu)) Ok(Some(pdu))
} else { } else {
Ok(None) Ok(None)
@ -1162,14 +1162,14 @@ impl Rooms {
/// Returns the leaf pdus of a room. /// Returns the leaf pdus of a room.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result<HashSet<EventId>> { pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result<HashSet<Arc<EventId>>> {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.roomid_pduleaves self.roomid_pduleaves
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(_, bytes)| { .map(|(_, bytes)| {
EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") Error::bad_database("EventID in roomid_pduleaves is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))
@ -1178,7 +1178,7 @@ impl Rooms {
} }
#[tracing::instrument(skip(self, room_id, event_ids))] #[tracing::instrument(skip(self, room_id, event_ids))]
pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc<EventId>]) -> Result<()> {
for prev in event_ids { for prev in event_ids {
let mut key = room_id.as_bytes().to_vec(); let mut key = room_id.as_bytes().to_vec();
key.extend_from_slice(prev.as_bytes()); key.extend_from_slice(prev.as_bytes());
@ -1193,7 +1193,11 @@ impl Rooms {
/// The provided `event_ids` become the new leaves, this allows a room to have multiple /// The provided `event_ids` become the new leaves, this allows a room to have multiple
/// `prev_events`. /// `prev_events`.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { pub fn replace_pdu_leaves<'a>(
&self,
room_id: &RoomId,
event_ids: impl IntoIterator<Item = &'a EventId> + Debug,
) -> Result<()> {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -1257,11 +1261,11 @@ impl Rooms {
/// ///
/// Returns pdu id /// Returns pdu id
#[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))]
pub fn append_pdu( pub fn append_pdu<'a>(
&self, &self,
pdu: &PduEvent, pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject, mut pdu_json: CanonicalJsonObject,
leaves: &[EventId], leaves: impl IntoIterator<Item = &'a EventId> + Debug,
db: &Database, db: &Database,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists");
@ -1420,7 +1424,7 @@ impl Rooms {
} }
// if the state_key fails // if the state_key fails
let target_user_id = UserId::try_from(state_key.clone()) let target_user_id = UserId::parse(state_key.clone())
.expect("This state_key was previously validated"); .expect("This state_key was previously validated");
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get()) let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
@ -1476,8 +1480,9 @@ impl Rooms {
if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name()))
&& self && self
.id_from_alias( .id_from_alias(
&format!("#admins:{}", db.globals.server_name()) <&RoomAliasId>::try_from(
.try_into() format!("#admins:{}", db.globals.server_name()).as_str(),
)
.expect("#admins:server_name is a valid room alias"), .expect("#admins:server_name is a valid room alias"),
)? )?
.as_ref() .as_ref()
@ -1528,7 +1533,7 @@ impl Rooms {
} }
"get_auth_chain" => { "get_auth_chain" => {
if args.len() == 1 { if args.len() == 1 {
if let Ok(event_id) = EventId::try_from(args[0]) { if let Ok(event_id) = EventId::parse_arc(args[0]) {
if let Some(event) = db.rooms.get_pdu_json(&event_id)? { if let Some(event) = db.rooms.get_pdu_json(&event_id)? {
let room_id_str = event let room_id_str = event
.get("room_id") .get("room_id")
@ -1539,12 +1544,12 @@ impl Rooms {
) )
})?; })?;
let room_id = RoomId::try_from(room_id_str) let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let start = Instant::now(); let start = Instant::now();
let count = server_server::get_auth_chain( let count = server_server::get_auth_chain(
&room_id, room_id,
vec![Arc::new(event_id)], vec![event_id],
db, db,
)? )?
.count(); .count();
@ -1567,12 +1572,12 @@ impl Rooms {
let string = body[1..body.len() - 1].join("\n"); let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) { match serde_json::from_str(&string) {
Ok(value) => { Ok(value) => {
let event_id = EventId::try_from(&*format!( let event_id = EventId::parse(format!(
"${}", "${}",
// Anything higher than version3 behaves the same // Anything higher than version3 behaves the same
ruma::signatures::reference_hash( ruma::signatures::reference_hash(
&value, &value,
&RoomVersionId::Version6 &RoomVersionId::V6
) )
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) ))
@ -1622,7 +1627,7 @@ impl Rooms {
} }
"get_pdu" => { "get_pdu" => {
if args.len() == 1 { if args.len() == 1 {
if let Ok(event_id) = EventId::try_from(args[0]) { if let Ok(event_id) = EventId::parse(args[0]) {
let mut outlier = false; let mut outlier = false;
let mut pdu_json = let mut pdu_json =
db.rooms.get_non_outlier_pdu_json(&event_id)?; db.rooms.get_non_outlier_pdu_json(&event_id)?;
@ -1948,7 +1953,7 @@ impl Rooms {
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
) -> Result<EventId> { ) -> Result<Arc<EventId>> {
let PduBuilder { let PduBuilder {
event_type, event_type,
content, content,
@ -1985,9 +1990,7 @@ impl Rooms {
// If there was no create event yet, assume we are creating a version 6 room right now // If there was no create event yet, assume we are creating a version 6 room right now
let room_version_id = create_event_content let room_version_id = create_event_content
.map_or(RoomVersionId::Version6, |create_event| { .map_or(RoomVersionId::V6, |create_event| create_event.room_version);
create_event.room_version
});
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
let auth_events = let auth_events =
@ -2016,9 +2019,9 @@ impl Rooms {
} }
let mut pdu = PduEvent { let mut pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater"), event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: room_id.clone(), room_id: room_id.to_owned(),
sender: sender.clone(), sender: sender.to_owned(),
origin_server_ts: utils::millis_since_unix_epoch() origin_server_ts: utils::millis_since_unix_epoch()
.try_into() .try_into()
.expect("time is valid"), .expect("time is valid"),
@ -2083,7 +2086,7 @@ impl Rooms {
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
// Generate event id // Generate event id
pdu.event_id = EventId::try_from(&*format!( pdu.event_id = EventId::parse_arc(format!(
"${}", "${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id) ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -2107,7 +2110,7 @@ impl Rooms {
pdu_json, pdu_json,
// Since this PDU references all pdu_leaves we can update the leaves // Since this PDU references all pdu_leaves we can update the leaves
// of the room // of the room
&[pdu.event_id.clone()], iter::once(&*pdu.event_id),
db, db,
)?; )?;
@ -2206,7 +2209,7 @@ impl Rooms {
let mut first_pdu_id = prefix.clone(); let mut first_pdu_id = prefix.clone();
first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes());
let user_id = user_id.clone(); let user_id = user_id.to_owned();
Ok(self Ok(self
.pduid_pdu .pduid_pdu
@ -2243,7 +2246,7 @@ impl Rooms {
let current: &[u8] = &current; let current: &[u8] = &current;
let user_id = user_id.clone(); let user_id = user_id.to_owned();
Ok(self Ok(self
.pduid_pdu .pduid_pdu
@ -2280,7 +2283,7 @@ impl Rooms {
let current: &[u8] = &current; let current: &[u8] = &current;
let user_id = user_id.clone(); let user_id = user_id.to_owned();
Ok(self Ok(self
.pduid_pdu .pduid_pdu
@ -2412,7 +2415,7 @@ impl Rooms {
for room_ids in direct_event.content.0.values_mut() { for room_ids in direct_event.content.0.values_mut() {
if room_ids.iter().any(|r| r == &predecessor.room_id) { if room_ids.iter().any(|r| r == &predecessor.room_id) {
room_ids.push(room_id.clone()); room_ids.push(room_id.to_owned());
room_ids_updated = true; room_ids_updated = true;
} }
} }
@ -2451,7 +2454,11 @@ impl Rooms {
EventType::IgnoredUserList, EventType::IgnoredUserList,
)? )?
.map_or(false, |ignored| { .map_or(false, |ignored| {
ignored.content.ignored_users.contains(sender) ignored
.content
.ignored_users
.iter()
.any(|user| user == sender)
}); });
if is_ignored { if is_ignored {
@ -2537,7 +2544,7 @@ impl Rooms {
self.our_real_users_cache self.our_real_users_cache
.write() .write()
.unwrap() .unwrap()
.insert(room_id.clone(), Arc::new(real_users)); .insert(room_id.to_owned(), Arc::new(real_users));
for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) {
if !joined_servers.remove(&old_joined_server) { if !joined_servers.remove(&old_joined_server) {
@ -2582,7 +2589,7 @@ impl Rooms {
&self, &self,
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
) -> Result<Arc<HashSet<UserId>>> { ) -> Result<Arc<HashSet<Box<UserId>>>> {
let maybe = self let maybe = self
.our_real_users_cache .our_real_users_cache
.read() .read()
@ -2650,7 +2657,7 @@ impl Rooms {
self.appservice_in_room_cache self.appservice_in_room_cache
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default() .or_default()
.insert(appservice.0.clone(), in_room); .insert(appservice.0.clone(), in_room);
@ -2694,7 +2701,7 @@ impl Rooms {
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -2754,7 +2761,7 @@ impl Rooms {
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::try_from(sender).ok()) .filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()) .map(|user| user.server_name().to_owned())
.collect(); .collect();
@ -2778,9 +2785,7 @@ impl Rooms {
let (make_leave_response, remote_server) = make_leave_response_and_server?; let (make_leave_response, remote_server) = make_leave_response_and_server?;
let room_version_id = match make_leave_response.room_version { let room_version_id = match make_leave_response.room_version {
Some(version) Some(version) if version == RoomVersionId::V5 || version == RoomVersionId::V6 => {
if version == RoomVersionId::Version5 || version == RoomVersionId::Version6 =>
{
version version
} }
_ => return Err(Error::BadServerResponse("Room version is not supported")), _ => return Err(Error::BadServerResponse("Room version is not supported")),
@ -2817,7 +2822,7 @@ impl Rooms {
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
// Generate event id // Generate event id
let event_id = EventId::try_from(&*format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -2902,11 +2907,11 @@ impl Rooms {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result<Option<RoomId>> { pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result<Option<Box<RoomId>>> {
self.alias_roomid self.alias_roomid
.get(alias.alias().as_bytes())? .get(alias.alias().as_bytes())?
.map(|bytes| { .map(|bytes| {
RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Room ID in alias_roomid is invalid unicode.") Error::bad_database("Room ID in alias_roomid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid."))
@ -2918,7 +2923,7 @@ impl Rooms {
pub fn room_aliases<'a>( pub fn room_aliases<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<RoomAliasId>> + 'a { ) -> impl Iterator<Item = Result<Box<RoomAliasId>>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -2947,9 +2952,9 @@ impl Rooms {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn public_rooms(&self) -> impl Iterator<Item = Result<RoomId>> + '_ { pub fn public_rooms(&self) -> impl Iterator<Item = Result<Box<RoomId>>> + '_ {
self.publicroomids.iter().map(|(bytes, _)| { self.publicroomids.iter().map(|(bytes, _)| {
RoomId::try_from( RoomId::parse(
utils::string_from_bytes(&bytes).map_err(|_| { utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Room ID in publicroomids is invalid unicode.") Error::bad_database("Room ID in publicroomids is invalid unicode.")
})?, })?,
@ -3009,8 +3014,8 @@ impl Rooms {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn get_shared_rooms<'a>( pub fn get_shared_rooms<'a>(
&'a self, &'a self,
users: Vec<UserId>, users: Vec<Box<UserId>>,
) -> Result<impl Iterator<Item = Result<RoomId>> + 'a> { ) -> Result<impl Iterator<Item = Result<Box<RoomId>>> + 'a> {
let iterators = users.into_iter().map(move |user_id| { let iterators = users.into_iter().map(move |user_id| {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -3037,7 +3042,7 @@ impl Rooms {
Ok(utils::common_elements(iterators, Ord::cmp) Ok(utils::common_elements(iterators, Ord::cmp)
.expect("users is not empty") .expect("users is not empty")
.map(|bytes| { .map(|bytes| {
RoomId::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| {
Error::bad_database("Invalid RoomId bytes in userroomid_joined") Error::bad_database("Invalid RoomId bytes in userroomid_joined")
})?) })?)
.map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined."))
@ -3054,7 +3059,7 @@ impl Rooms {
prefix.push(0xff); prefix.push(0xff);
self.roomserverids.scan_prefix(prefix).map(|(key, _)| { self.roomserverids.scan_prefix(prefix).map(|(key, _)| {
Box::<ServerName>::try_from( ServerName::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3082,12 +3087,12 @@ impl Rooms {
pub fn server_rooms<'a>( pub fn server_rooms<'a>(
&'a self, &'a self,
server: &ServerName, server: &ServerName,
) -> impl Iterator<Item = Result<RoomId>> + 'a { ) -> impl Iterator<Item = Result<Box<RoomId>>> + 'a {
let mut prefix = server.as_bytes().to_vec(); let mut prefix = server.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.serverroomids.scan_prefix(prefix).map(|(key, _)| { self.serverroomids.scan_prefix(prefix).map(|(key, _)| {
RoomId::try_from( RoomId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3104,12 +3109,12 @@ impl Rooms {
pub fn room_members<'a>( pub fn room_members<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<UserId>> + 'a { ) -> impl Iterator<Item = Result<Box<UserId>>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| {
UserId::try_from( UserId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3150,14 +3155,14 @@ impl Rooms {
pub fn room_useroncejoined<'a>( pub fn room_useroncejoined<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<UserId>> + 'a { ) -> impl Iterator<Item = Result<Box<UserId>>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.roomuseroncejoinedids self.roomuseroncejoinedids
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(key, _)| { .map(|(key, _)| {
UserId::try_from( UserId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3176,14 +3181,14 @@ impl Rooms {
pub fn room_members_invited<'a>( pub fn room_members_invited<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<UserId>> + 'a { ) -> impl Iterator<Item = Result<Box<UserId>>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.roomuserid_invitecount self.roomuserid_invitecount
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(key, _)| { .map(|(key, _)| {
UserId::try_from( UserId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3232,11 +3237,11 @@ impl Rooms {
pub fn rooms_joined<'a>( pub fn rooms_joined<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
) -> impl Iterator<Item = Result<RoomId>> + 'a { ) -> impl Iterator<Item = Result<Box<RoomId>>> + 'a {
self.userroomid_joined self.userroomid_joined
.scan_prefix(user_id.as_bytes().to_vec()) .scan_prefix(user_id.as_bytes().to_vec())
.map(|(key, _)| { .map(|(key, _)| {
RoomId::try_from( RoomId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3255,14 +3260,14 @@ impl Rooms {
pub fn rooms_invited<'a>( pub fn rooms_invited<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
) -> impl Iterator<Item = Result<(RoomId, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a { ) -> impl Iterator<Item = Result<(Box<RoomId>, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.userroomid_invitestate self.userroomid_invitestate
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(key, state)| { .map(|(key, state)| {
let room_id = RoomId::try_from( let room_id = RoomId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3328,14 +3333,14 @@ impl Rooms {
pub fn rooms_left<'a>( pub fn rooms_left<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
) -> impl Iterator<Item = Result<(RoomId, Vec<Raw<AnySyncStateEvent>>)>> + 'a { ) -> impl Iterator<Item = Result<(Box<RoomId>, Vec<Raw<AnySyncStateEvent>>)>> + 'a {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.userroomid_leftstate self.userroomid_leftstate
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(key, state)| { .map(|(key, state)| {
let room_id = RoomId::try_from( let room_id = RoomId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()

35
src/database/rooms/edus.rs

@ -11,7 +11,7 @@ use ruma::{
}; };
use std::{ use std::{
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::TryInto,
mem, mem,
sync::Arc, sync::Arc,
}; };
@ -76,8 +76,13 @@ impl RoomEdus {
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
since: u64, since: u64,
) -> impl Iterator<Item = Result<(UserId, u64, Raw<ruma::events::AnySyncEphemeralRoomEvent>)>> + 'a ) -> impl Iterator<
{ Item = Result<(
Box<UserId>,
u64,
Raw<ruma::events::AnySyncEphemeralRoomEvent>,
)>,
> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
let prefix2 = prefix.clone(); let prefix2 = prefix.clone();
@ -92,7 +97,7 @@ impl RoomEdus {
let count = let count =
utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::<u64>()]) utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::<u64>()])
.map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?;
let user_id = UserId::try_from( let user_id = UserId::parse(
utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..]) utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..])
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid readreceiptid userid bytes in db.") Error::bad_database("Invalid readreceiptid userid bytes in db.")
@ -305,17 +310,13 @@ impl RoomEdus {
let mut user_ids = HashSet::new(); let mut user_ids = HashSet::new();
for user_id in self for (_, user_id) in self.typingid_userid.scan_prefix(prefix) {
.typingid_userid let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| {
.scan_prefix(prefix)
.map(|(_, user_id)| {
UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| {
Error::bad_database("User ID in typingid_userid is invalid unicode.") Error::bad_database("User ID in typingid_userid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid.")) .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?;
})
{ user_ids.insert(user_id);
user_ids.insert(user_id?);
} }
Ok(SyncEphemeralRoomEvent { Ok(SyncEphemeralRoomEvent {
@ -449,7 +450,7 @@ impl RoomEdus {
{ {
// Send new presence events to set the user offline // Send new presence events to set the user offline
let count = globals.next_count()?.to_be_bytes(); let count = globals.next_count()?.to_be_bytes();
let user_id = utils::string_from_bytes(&user_id_bytes) let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes)
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.")
})? })?
@ -475,7 +476,7 @@ impl RoomEdus {
presence: PresenceState::Offline, presence: PresenceState::Offline,
status_msg: None, status_msg: None,
}, },
sender: user_id.clone(), sender: user_id.to_owned(),
}) })
.expect("PresenceEvent can be serialized"), .expect("PresenceEvent can be serialized"),
)?; )?;
@ -498,7 +499,7 @@ impl RoomEdus {
since: u64, since: u64,
_rooms: &super::Rooms, _rooms: &super::Rooms,
_globals: &super::super::globals::Globals, _globals: &super::super::globals::Globals,
) -> Result<HashMap<UserId, PresenceEvent>> { ) -> Result<HashMap<Box<UserId>, PresenceEvent>> {
//self.presence_maintain(rooms, globals)?; //self.presence_maintain(rooms, globals)?;
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
@ -513,7 +514,7 @@ impl RoomEdus {
.iter_from(&*first_possible_edu, false) .iter_from(&*first_possible_edu, false)
.take_while(|(key, _)| key.starts_with(&prefix)) .take_while(|(key, _)| key.starts_with(&prefix))
{ {
let user_id = UserId::try_from( let user_id = UserId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()

11
src/database/sending.rs

@ -1,6 +1,6 @@
use std::{ use std::{
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::TryInto,
fmt::Debug, fmt::Debug,
sync::Arc, sync::Arc,
time::{Duration, Instant}, time::{Duration, Instant},
@ -397,7 +397,7 @@ impl Sending {
// Because synapse resyncs, we can just insert dummy data // Because synapse resyncs, we can just insert dummy data
let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { let edu = Edu::DeviceListUpdate(DeviceListUpdateContent {
user_id, user_id,
device_id: device_id!("dummy"), device_id: device_id!("dummy").to_owned(),
device_display_name: Some("Dummy".to_owned()), device_display_name: Some("Dummy".to_owned()),
stream_id: uint!(1), stream_id: uint!(1),
prev_id: Vec::new(), prev_id: Vec::new(),
@ -583,8 +583,7 @@ impl Sending {
} }
} }
let userid = let userid = UserId::parse(utils::string_from_bytes(user).map_err(|_| {
UserId::try_from(utils::string_from_bytes(user).map_err(|_| {
( (
kind.clone(), kind.clone(),
Error::bad_database("Invalid push user string in db."), Error::bad_database("Invalid push user string in db."),
@ -732,7 +731,7 @@ impl Sending {
})?; })?;
( (
OutgoingKind::Appservice(Box::<ServerName>::try_from(server).map_err(|_| { OutgoingKind::Appservice(ServerName::parse(server).map_err(|_| {
Error::bad_database("Invalid server string in server_currenttransaction") Error::bad_database("Invalid server string in server_currenttransaction")
})?), })?),
if value.is_empty() { if value.is_empty() {
@ -771,7 +770,7 @@ impl Sending {
})?; })?;
( (
OutgoingKind::Normal(Box::<ServerName>::try_from(server).map_err(|_| { OutgoingKind::Normal(ServerName::parse(server).map_err(|_| {
Error::bad_database("Invalid server string in server_currenttransaction") Error::bad_database("Invalid server string in server_currenttransaction")
})?), })?),
if value.is_empty() { if value.is_empty() {

32
src/database/users.rs

@ -8,7 +8,7 @@ use ruma::{
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt,
UserId, UserId,
}; };
use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc}; use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc};
use tracing::warn; use tracing::warn;
use super::abstraction::Tree; use super::abstraction::Tree;
@ -62,12 +62,11 @@ impl Users {
rooms: &super::rooms::Rooms, rooms: &super::rooms::Rooms,
globals: &super::globals::Globals, globals: &super::globals::Globals,
) -> Result<bool> { ) -> Result<bool> {
let admin_room_alias_id = let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name()))
RoomAliasId::try_from(format!("#admins:{}", globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap();
Ok(rooms.is_joined(user_id, &admin_room_id)?) rooms.is_joined(user_id, &admin_room_id)
} }
/// Create a new user account on this homeserver. /// Create a new user account on this homeserver.
@ -85,7 +84,7 @@ impl Users {
/// Find out which user an access token belongs to. /// Find out which user an access token belongs to.
#[tracing::instrument(skip(self, token))] #[tracing::instrument(skip(self, token))]
pub fn find_from_token(&self, token: &str) -> Result<Option<(UserId, String)>> { pub fn find_from_token(&self, token: &str) -> Result<Option<(Box<UserId>, String)>> {
self.token_userdeviceid self.token_userdeviceid
.get(token.as_bytes())? .get(token.as_bytes())?
.map_or(Ok(None), |bytes| { .map_or(Ok(None), |bytes| {
@ -98,7 +97,7 @@ impl Users {
})?; })?;
Ok(Some(( Ok(Some((
UserId::try_from(utils::string_from_bytes(user_bytes).map_err(|_| { UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| {
Error::bad_database("User ID in token_userdeviceid is invalid unicode.") Error::bad_database("User ID in token_userdeviceid is invalid unicode.")
})?) })?)
.map_err(|_| { .map_err(|_| {
@ -113,9 +112,9 @@ impl Users {
/// Returns an iterator over all users on this homeserver. /// Returns an iterator over all users on this homeserver.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn iter(&self) -> impl Iterator<Item = Result<UserId>> + '_ { pub fn iter(&self) -> impl Iterator<Item = Result<Box<UserId>>> + '_ {
self.userid_password.iter().map(|(bytes, _)| { self.userid_password.iter().map(|(bytes, _)| {
UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in userid_password is invalid unicode.") Error::bad_database("User ID in userid_password is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) .map_err(|_| Error::bad_database("User ID in userid_password is invalid."))
@ -181,20 +180,21 @@ impl Users {
/// Get the avatar_url of a user. /// Get the avatar_url of a user.
#[tracing::instrument(skip(self, user_id))] #[tracing::instrument(skip(self, user_id))]
pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<MxcUri>> { pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<Box<MxcUri>>> {
self.userid_avatarurl self.userid_avatarurl
.get(user_id.as_bytes())? .get(user_id.as_bytes())?
.map(|bytes| { .map(|bytes| {
let s = utils::string_from_bytes(&bytes) let s = utils::string_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) s.try_into()
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
}) })
.transpose() .transpose()
} }
/// Sets a new avatar_url or removes it if avatar_url is None. /// Sets a new avatar_url or removes it if avatar_url is None.
#[tracing::instrument(skip(self, user_id, avatar_url))] #[tracing::instrument(skip(self, user_id, avatar_url))]
pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<MxcUri>) -> Result<()> { pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<Box<MxcUri>>) -> Result<()> {
if let Some(avatar_url) = avatar_url { if let Some(avatar_url) = avatar_url {
self.userid_avatarurl self.userid_avatarurl
.insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?;
@ -409,7 +409,7 @@ impl Users {
device_id: &DeviceId, device_id: &DeviceId,
key_algorithm: &DeviceKeyAlgorithm, key_algorithm: &DeviceKeyAlgorithm,
globals: &super::globals::Globals, globals: &super::globals::Globals,
) -> Result<Option<(DeviceKeyId, OneTimeKey)>> { ) -> Result<Option<(Box<DeviceKeyId>, OneTimeKey)>> {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
prefix.extend_from_slice(device_id.as_bytes()); prefix.extend_from_slice(device_id.as_bytes());
@ -459,7 +459,7 @@ impl Users {
.scan_prefix(userdeviceid) .scan_prefix(userdeviceid)
.map(|(bytes, _)| { .map(|(bytes, _)| {
Ok::<_, Error>( Ok::<_, Error>(
serde_json::from_slice::<DeviceKeyId>( serde_json::from_slice::<Box<DeviceKeyId>>(
&*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| {
Error::bad_database("OneTimeKey ID in db is invalid.") Error::bad_database("OneTimeKey ID in db is invalid.")
})?, })?,
@ -632,7 +632,7 @@ impl Users {
.ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))?
.as_object_mut() .as_object_mut()
.ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))?
.entry(sender_id.clone()) .entry(sender_id.to_owned())
.or_insert_with(|| serde_json::Map::new().into()); .or_insert_with(|| serde_json::Map::new().into());
signatures signatures
@ -657,7 +657,7 @@ impl Users {
user_or_room_id: &str, user_or_room_id: &str,
from: u64, from: u64,
to: Option<u64>, to: Option<u64>,
) -> impl Iterator<Item = Result<UserId>> + 'a { ) -> impl Iterator<Item = Result<Box<UserId>>> + 'a {
let mut prefix = user_or_room_id.as_bytes().to_vec(); let mut prefix = user_or_room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -683,7 +683,7 @@ impl Users {
} }
}) })
.map(|(_, bytes)| { .map(|(_, bytes)| {
UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid."))

35
src/pdu.rs

@ -13,7 +13,7 @@ use serde_json::{
json, json,
value::{to_raw_value, RawValue as RawJsonValue}, value::{to_raw_value, RawValue as RawJsonValue},
}; };
use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc};
use tracing::warn; use tracing::warn;
/// Content hashes of a PDU. /// Content hashes of a PDU.
@ -25,20 +25,20 @@ pub struct EventHash {
#[derive(Clone, Deserialize, Serialize, Debug)] #[derive(Clone, Deserialize, Serialize, Debug)]
pub struct PduEvent { pub struct PduEvent {
pub event_id: EventId, pub event_id: Arc<EventId>,
pub room_id: RoomId, pub room_id: Box<RoomId>,
pub sender: UserId, pub sender: Box<UserId>,
pub origin_server_ts: UInt, pub origin_server_ts: UInt,
#[serde(rename = "type")] #[serde(rename = "type")]
pub kind: EventType, pub kind: EventType,
pub content: Box<RawJsonValue>, pub content: Box<RawJsonValue>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub state_key: Option<String>, pub state_key: Option<String>,
pub prev_events: Vec<EventId>, pub prev_events: Vec<Arc<EventId>>,
pub depth: UInt, pub depth: UInt,
pub auth_events: Vec<EventId>, pub auth_events: Vec<Arc<EventId>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub redacts: Option<EventId>, pub redacts: Option<Arc<EventId>>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub unsigned: Option<Box<RawJsonValue>>, pub unsigned: Option<Box<RawJsonValue>>,
pub hashes: EventHash, pub hashes: EventHash,
@ -266,7 +266,9 @@ impl PduEvent {
} }
impl state_res::Event for PduEvent { impl state_res::Event for PduEvent {
fn event_id(&self) -> &EventId { type Id = Arc<EventId>;
fn event_id(&self) -> &Self::Id {
&self.event_id &self.event_id
} }
@ -294,15 +296,15 @@ impl state_res::Event for PduEvent {
self.state_key.as_deref() self.state_key.as_deref()
} }
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + '_> { fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + '_> {
Box::new(self.prev_events.iter()) Box::new(self.prev_events.iter())
} }
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + '_> { fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + '_> {
Box::new(self.auth_events.iter()) Box::new(self.auth_events.iter())
} }
fn redacts(&self) -> Option<&EventId> { fn redacts(&self) -> Option<&Self::Id> {
self.redacts.as_ref() self.redacts.as_ref()
} }
} }
@ -331,18 +333,19 @@ impl Ord for PduEvent {
/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap<String, CanonicalJsonValue>`. /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap<String, CanonicalJsonValue>`.
pub(crate) fn gen_event_id_canonical_json( pub(crate) fn gen_event_id_canonical_json(
pdu: &RawJsonValue, pdu: &RawJsonValue,
) -> crate::Result<(EventId, CanonicalJsonObject)> { ) -> crate::Result<(Box<EventId>, CanonicalJsonObject)> {
let value = serde_json::from_str(pdu.get()).map_err(|e| { let value = serde_json::from_str(pdu.get()).map_err(|e| {
warn!("Error parsing incoming event {:?}: {:?}", pdu, e); warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response") Error::BadServerResponse("Invalid PDU in server response")
})?; })?;
let event_id = EventId::try_from(&*format!( let event_id = format!(
"${}", "${}",
// Anything higher than version3 behaves the same // Anything higher than version3 behaves the same
ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) ruma::signatures::reference_hash(&value, &RoomVersionId::V6)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) )
.try_into()
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
Ok((event_id, value)) Ok((event_id, value))
@ -356,7 +359,7 @@ pub struct PduBuilder {
pub content: Box<RawJsonValue>, pub content: Box<RawJsonValue>,
pub unsigned: Option<BTreeMap<String, serde_json::Value>>, pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>, pub state_key: Option<String>,
pub redacts: Option<EventId>, pub redacts: Option<Arc<EventId>>,
} }
/// Direct conversion prevents loss of the empty `state_key` that ruma requires. /// Direct conversion prevents loss of the empty `state_key` that ruma requires.

10
src/ruma_wrapper.rs

@ -20,7 +20,6 @@ use {
}, },
ruma::api::{AuthScheme, IncomingRequest}, ruma::api::{AuthScheme, IncomingRequest},
std::collections::BTreeMap, std::collections::BTreeMap,
std::convert::TryFrom,
std::io::Cursor, std::io::Cursor,
tracing::{debug, warn}, tracing::{debug, warn},
}; };
@ -29,7 +28,7 @@ use {
/// first. /// first.
pub struct Ruma<T: Outgoing> { pub struct Ruma<T: Outgoing> {
pub body: T::Incoming, pub body: T::Incoming,
pub sender_user: Option<UserId>, pub sender_user: Option<Box<UserId>>,
pub sender_device: Option<Box<DeviceId>>, pub sender_device: Option<Box<DeviceId>>,
pub sender_servername: Option<Box<ServerName>>, pub sender_servername: Option<Box<ServerName>>,
// This is None when body is not a valid string // This is None when body is not a valid string
@ -86,7 +85,7 @@ where
registration registration
.get("as_token") .get("as_token")
.and_then(|as_token| as_token.as_str()) .and_then(|as_token| as_token.as_str())
.map_or(false, |as_token| token.as_deref() == Some(as_token)) .map_or(false, |as_token| token == Some(as_token))
}) { }) {
match metadata.authentication { match metadata.authentication {
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
@ -103,8 +102,7 @@ where
.unwrap() .unwrap()
}, },
|string| { |string| {
UserId::try_from(string.expect("parsing to string always works")) UserId::parse(string.expect("parsing to string always works")).unwrap()
.unwrap()
}, },
); );
@ -171,7 +169,7 @@ where
} }
}; };
let origin = match Box::<ServerName>::try_from(origin_str) { let origin = match ServerName::parse(origin_str) {
Ok(s) => s, Ok(s) => s,
_ => { _ => {
warn!( warn!(

173
src/server_server.rs

@ -64,6 +64,7 @@ use std::{
future::Future, future::Future,
mem, mem,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
ops::Deref,
pin::Pin, pin::Pin,
sync::{Arc, RwLock, RwLockWriteGuard}, sync::{Arc, RwLock, RwLockWriteGuard},
time::{Duration, Instant, SystemTime}, time::{Duration, Instant, SystemTime},
@ -396,10 +397,7 @@ async fn find_actual_destination(
} }
if let Some(port) = force_port { if let Some(port) = force_port {
FedDest::Named( FedDest::Named(delegated_hostname, format!(":{}", port))
delegated_hostname,
format!(":{}", port.to_string()),
)
} else { } else {
add_port_to_hostname(&delegated_hostname) add_port_to_hostname(&delegated_hostname)
} }
@ -432,10 +430,7 @@ async fn find_actual_destination(
} }
if let Some(port) = force_port { if let Some(port) = force_port {
FedDest::Named( FedDest::Named(hostname.clone(), format!(":{}", port))
hostname.clone(),
format!(":{}", port.to_string()),
)
} else { } else {
add_port_to_hostname(&hostname) add_port_to_hostname(&hostname)
} }
@ -550,11 +545,10 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json<String> {
return Json("Federation is disabled.".to_owned()); return Json("Federation is disabled.".to_owned());
} }
let mut verify_keys = BTreeMap::new(); let mut verify_keys: BTreeMap<Box<ServerSigningKeyId>, VerifyKey> = BTreeMap::new();
verify_keys.insert( verify_keys.insert(
ServerSigningKeyId::try_from( format!("ed25519:{}", db.globals.keypair().version())
format!("ed25519:{}", db.globals.keypair().version()).as_str(), .try_into()
)
.expect("found invalid server signing keys in DB"), .expect("found invalid server signing keys in DB"),
VerifyKey { VerifyKey {
key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD),
@ -736,7 +730,7 @@ pub async fn send_transaction_message_route(
// 0. Check the server is in the room // 0. Check the server is in the room
let room_id = match value let room_id = match value
.get("room_id") .get("room_id")
.and_then(|id| RoomId::try_from(id.as_str()?).ok()) .and_then(|id| RoomId::parse(id.as_str()?).ok())
{ {
Some(id) => id, Some(id) => id,
None => { None => {
@ -1001,14 +995,9 @@ pub(crate) async fn handle_incoming_pdu<'a>(
} }
// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
let mut graph = HashMap::new(); let mut graph: HashMap<Arc<EventId>, _> = HashMap::new();
let mut eventid_info = HashMap::new(); let mut eventid_info = HashMap::new();
let mut todo_outlier_stack: Vec<_> = incoming_pdu let mut todo_outlier_stack: Vec<Arc<EventId>> = incoming_pdu.prev_events.clone();
.prev_events
.iter()
.cloned()
.map(Arc::new)
.collect();
let mut amount = 0; let mut amount = 0;
@ -1027,7 +1016,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
if amount > 100 { if amount > 100 {
// Max limit reached // Max limit reached
warn!("Max prev event limit reached!"); warn!("Max prev event limit reached!");
graph.insert((*prev_event_id).clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());
continue; continue;
} }
@ -1038,27 +1027,27 @@ pub(crate) async fn handle_incoming_pdu<'a>(
amount += 1; amount += 1;
for prev_prev in &pdu.prev_events { for prev_prev in &pdu.prev_events {
if !graph.contains_key(prev_prev) { if !graph.contains_key(prev_prev) {
todo_outlier_stack.push(dbg!(Arc::new(prev_prev.clone()))); todo_outlier_stack.push(dbg!(prev_prev.clone()));
} }
} }
graph.insert( graph.insert(
(*prev_event_id).clone(), prev_event_id.clone(),
pdu.prev_events.iter().cloned().collect(), pdu.prev_events.iter().cloned().collect(),
); );
} else { } else {
// Time based check failed // Time based check failed
graph.insert((*prev_event_id).clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());
} }
eventid_info.insert(prev_event_id.clone(), (pdu, json)); eventid_info.insert(prev_event_id.clone(), (pdu, json));
} else { } else {
// Get json failed // Get json failed
graph.insert((*prev_event_id).clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());
} }
} else { } else {
// Fetch and handle failed // Fetch and handle failed
graph.insert((*prev_event_id).clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());
} }
} }
@ -1074,7 +1063,6 @@ pub(crate) async fn handle_incoming_pdu<'a>(
.get(event_id) .get(event_id)
.map_or_else(|| uint!(0), |info| info.0.origin_server_ts), .map_or_else(|| uint!(0), |info| info.0.origin_server_ts),
), ),
ruma::event_id!("$notimportant"),
)) ))
}) })
.map_err(|_| "Error sorting prev events".to_owned())?; .map_err(|_| "Error sorting prev events".to_owned())?;
@ -1084,7 +1072,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
if errors >= 5 { if errors >= 5 {
break; break;
} }
if let Some((pdu, json)) = eventid_info.remove(&prev_id) { if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts {
continue; continue;
} }
@ -1200,8 +1188,7 @@ fn handle_outlier_pdu<'a>(
&incoming_pdu &incoming_pdu
.auth_events .auth_events
.iter() .iter()
.cloned() .map(|x| Arc::from(&**x))
.map(Arc::new)
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
create_event, create_event,
room_id, room_id,
@ -1331,7 +1318,7 @@ async fn upgrade_outlier_to_timeline_pdu(
let mut state_at_incoming_event = None; let mut state_at_incoming_event = None;
if incoming_pdu.prev_events.len() == 1 { if incoming_pdu.prev_events.len() == 1 {
let prev_event = &incoming_pdu.prev_events[0]; let prev_event = &*incoming_pdu.prev_events[0];
let prev_event_sstatehash = db let prev_event_sstatehash = db
.rooms .rooms
.pdu_shortstatehash(prev_event) .pdu_shortstatehash(prev_event)
@ -1353,7 +1340,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
state.insert(shortstatekey, Arc::new(prev_event.clone())); state.insert(shortstatekey, Arc::from(prev_event));
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1397,7 +1384,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id));
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1410,14 +1397,13 @@ async fn upgrade_outlier_to_timeline_pdu(
.get_statekey_from_short(k) .get_statekey_from_short(k)
.map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?;
state.insert(k, (*id).clone()); state.insert(k, id.clone());
starting_events.push(id); starting_events.push(id);
} }
auth_chain_sets.push( auth_chain_sets.push(
get_auth_chain(room_id, starting_events, db) get_auth_chain(room_id, starting_events, db)
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.map(|event_id| (*event_id).clone())
.collect(), .collect(),
); );
@ -1444,7 +1430,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&event_type, &state_key, &db.globals) .get_or_create_shortstatekey(&event_type, &state_key, &db.globals)
.map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?;
Ok((shortstatekey, Arc::new(event_id))) Ok((shortstatekey, event_id))
}) })
.collect::<Result<_, String>>()?, .collect::<Result<_, String>>()?,
), ),
@ -1479,8 +1465,7 @@ async fn upgrade_outlier_to_timeline_pdu(
origin, origin,
&res.pdu_ids &res.pdu_ids
.iter() .iter()
.cloned() .map(|x| Arc::from(&**x))
.map(Arc::new)
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
create_event, create_event,
room_id, room_id,
@ -1488,7 +1473,7 @@ async fn upgrade_outlier_to_timeline_pdu(
) )
.await; .await;
let mut state = BTreeMap::new(); let mut state: BTreeMap<_, Arc<EventId>> = BTreeMap::new();
for (pdu, _) in state_vec { for (pdu, _) in state_vec {
let state_key = pdu let state_key = pdu
.state_key .state_key
@ -1502,7 +1487,7 @@ async fn upgrade_outlier_to_timeline_pdu(
match state.entry(shortstatekey) { match state.entry(shortstatekey) {
btree_map::Entry::Vacant(v) => { btree_map::Entry::Vacant(v) => {
v.insert(Arc::new(pdu.event_id.clone())); v.insert(Arc::from(&*pdu.event_id));
} }
btree_map::Entry::Occupied(_) => return Err( btree_map::Entry::Occupied(_) => return Err(
"State event's type and state_key combination exists multiple times." "State event's type and state_key combination exists multiple times."
@ -1577,7 +1562,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -1647,7 +1632,7 @@ async fn upgrade_outlier_to_timeline_pdu(
db, db,
&incoming_pdu, &incoming_pdu,
val, val,
extremities, extremities.iter().map(Deref::deref),
state_ids_compressed, state_ids_compressed,
soft_fail, soft_fail,
&state_lock, &state_lock,
@ -1715,7 +1700,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
leaf_state.insert(shortstatekey, Arc::new(leaf_pdu.event_id.clone())); leaf_state.insert(shortstatekey, Arc::from(&*leaf_pdu.event_id));
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1730,7 +1715,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
state_after.insert(shortstatekey, Arc::new(incoming_pdu.event_id.clone())); state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id));
} }
fork_states.push(state_after); fork_states.push(state_after);
@ -1762,7 +1747,6 @@ async fn upgrade_outlier_to_timeline_pdu(
db, db,
) )
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.map(|event_id| (*event_id).clone())
.collect(), .collect(),
); );
} }
@ -1771,11 +1755,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.into_iter() .into_iter()
.map(|map| { .map(|map| {
map.into_iter() map.into_iter()
.map(|(k, id)| { .map(|(k, id)| db.rooms.get_statekey_from_short(k).map(|k| (k, id)))
db.rooms
.get_statekey_from_short(k)
.map(|k| (k, (*id).clone()))
})
.collect::<Result<StateMap<_>>>() .collect::<Result<StateMap<_>>>()
}) })
.collect::<Result<_>>() .collect::<Result<_>>()
@ -1832,7 +1812,7 @@ async fn upgrade_outlier_to_timeline_pdu(
db, db,
&incoming_pdu, &incoming_pdu,
val, val,
extremities, extremities.iter().map(Deref::deref),
state_ids_compressed, state_ids_compressed,
soft_fail, soft_fail,
&state_lock, &state_lock,
@ -1874,7 +1854,8 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
let mut pdus = vec![]; let mut pdus = vec![];
for id in events { for id in events {
if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(id) { if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id)
{
// Exponential backoff // Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
@ -1914,7 +1895,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
match crate::pdu::gen_event_id_canonical_json(&res.pdu) { match crate::pdu::gen_event_id_canonical_json(&res.pdu) {
Ok(t) => t, Ok(t) => t,
Err(_) => { Err(_) => {
back_off((**id).clone()); back_off((**id).to_owned());
continue; continue;
} }
}; };
@ -1939,14 +1920,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
Ok((pdu, json)) => (pdu, Some(json)), Ok((pdu, json)) => (pdu, Some(json)),
Err(e) => { Err(e) => {
warn!("Authentication of event {} failed: {:?}", id, e); warn!("Authentication of event {} failed: {:?}", id, e);
back_off((**id).clone()); back_off((**id).to_owned());
continue; continue;
} }
} }
} }
Err(_) => { Err(_) => {
warn!("Failed to fetch event: {}", id); warn!("Failed to fetch event: {}", id);
back_off((**id).clone()); back_off((**id).to_owned());
continue; continue;
} }
} }
@ -2124,11 +2105,11 @@ pub(crate) async fn fetch_signing_keys(
/// Append the incoming event setting the state snapshot to the state from the /// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event. /// server that sent the event.
#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))] #[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))]
fn append_incoming_pdu( fn append_incoming_pdu<'a>(
db: &Database, db: &Database,
pdu: &PduEvent, pdu: &PduEvent,
pdu_json: CanonicalJsonObject, pdu_json: CanonicalJsonObject,
new_room_leaves: HashSet<EventId>, new_room_leaves: impl IntoIterator<Item = &'a EventId> + Clone + Debug,
state_ids_compressed: HashSet<CompressedStateEvent>, state_ids_compressed: HashSet<CompressedStateEvent>,
soft_fail: bool, soft_fail: bool,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
@ -2145,19 +2126,11 @@ fn append_incoming_pdu(
if soft_fail { if soft_fail {
db.rooms db.rooms
.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
db.rooms.replace_pdu_leaves( db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?;
&pdu.room_id,
&new_room_leaves.into_iter().collect::<Vec<_>>(),
)?;
return Ok(None); return Ok(None);
} }
let pdu_id = db.rooms.append_pdu( let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?;
pdu,
pdu_json,
&new_room_leaves.into_iter().collect::<Vec<_>>(),
db,
)?;
for appservice in db.appservice.all()? { for appservice in db.appservice.all()? {
if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? {
@ -2298,13 +2271,13 @@ fn get_auth_chain_inner(
event_id: &EventId, event_id: &EventId,
db: &Database, db: &Database,
) -> Result<HashSet<u64>> { ) -> Result<HashSet<u64>> {
let mut todo = vec![event_id.clone()]; let mut todo = vec![Arc::from(event_id)];
let mut found = HashSet::new(); let mut found = HashSet::new();
while let Some(event_id) = todo.pop() { while let Some(event_id) = todo.pop() {
match db.rooms.get_pdu(&event_id) { match db.rooms.get_pdu(&event_id) {
Ok(Some(pdu)) => { Ok(Some(pdu)) => {
if &pdu.room_id != room_id { if pdu.room_id != room_id {
return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db"));
} }
for auth_event in &pdu.auth_events { for auth_event in &pdu.auth_events {
@ -2363,10 +2336,10 @@ pub fn get_event_route(
.and_then(|val| val.as_str()) .and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?; .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let room_id = RoomId::try_from(room_id_str) let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
if !db.rooms.server_in_room(sender_servername, &room_id)? { if !db.rooms.server_in_room(sender_servername, room_id)? {
return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found."));
} }
@ -2417,7 +2390,7 @@ pub fn get_missing_events_route(
.and_then(|val| val.as_str()) .and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?; .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let event_room_id = RoomId::try_from(room_id_str) let event_room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
if event_room_id != body.room_id { if event_room_id != body.room_id {
@ -2436,7 +2409,7 @@ pub fn get_missing_events_route(
continue; continue;
} }
queued_events.extend_from_slice( queued_events.extend_from_slice(
&serde_json::from_value::<Vec<EventId>>( &serde_json::from_value::<Vec<Box<EventId>>>(
serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| {
Error::bad_database("Event in db has no prev_events field.") Error::bad_database("Event in db has no prev_events field.")
})?) })?)
@ -2485,14 +2458,14 @@ pub fn get_event_authorization_route(
.and_then(|val| val.as_str()) .and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?; .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let room_id = RoomId::try_from(room_id_str) let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
if !db.rooms.server_in_room(sender_servername, &room_id)? { if !db.rooms.server_in_room(sender_servername, room_id)? {
return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found."));
} }
let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::new(body.event_id.clone())], &db)?; let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?;
Ok(get_event_authorization::v1::Response { Ok(get_event_authorization::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2550,7 +2523,7 @@ pub fn get_room_state_route(
}) })
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?;
Ok(get_room_state::v1::Response { Ok(get_room_state::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2606,13 +2579,13 @@ pub fn get_room_state_ids_route(
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)?
.into_iter() .into_iter()
.map(|(_, id)| (*id).clone()) .map(|(_, id)| (*id).to_owned())
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?;
Ok(get_room_state_ids::v1::Response { Ok(get_room_state_ids::v1::Response {
auth_chain_ids: auth_chain_ids.map(|id| (*id).clone()).collect(), auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(),
pdu_ids, pdu_ids,
} }
.into()) .into())
@ -2671,9 +2644,8 @@ pub fn create_join_event_template_route(
}; };
// If there was no create event yet, assume we are creating a version 6 room right now // If there was no create event yet, assume we are creating a version 6 room right now
let room_version_id = create_event_content.map_or(RoomVersionId::Version6, |create_event| { let room_version_id =
create_event.room_version create_event_content.map_or(RoomVersionId::V6, |create_event| create_event.room_version);
});
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
if !body.ver.contains(&room_version_id) { if !body.ver.contains(&room_version_id) {
@ -2693,6 +2665,7 @@ pub fn create_join_event_template_route(
membership: MembershipState::Join, membership: MembershipState::Join,
third_party_invite: None, third_party_invite: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("member event is valid value"); .expect("member event is valid value");
@ -2726,7 +2699,7 @@ pub fn create_join_event_template_route(
} }
let pdu = PduEvent { let pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater"), event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: body.room_id.clone(), room_id: body.room_id.clone(),
sender: body.user_id.clone(), sender: body.user_id.clone(),
origin_server_ts: utils::millis_since_unix_epoch() origin_server_ts: utils::millis_since_unix_epoch()
@ -2838,7 +2811,7 @@ async fn create_join_event(
.roomid_mutex_federation .roomid_mutex_federation
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let mutex_lock = mutex.lock().await; let mutex_lock = mutex.lock().await;
@ -2937,8 +2910,7 @@ pub async fn create_invite_route(
return Err(Error::bad_config("Federation is disabled.")); return Err(Error::bad_config("Federation is disabled."));
} }
if body.room_version != RoomVersionId::Version5 && body.room_version != RoomVersionId::Version6 if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 {
{
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::IncompatibleRoomVersion { ErrorKind::IncompatibleRoomVersion {
room_version: body.room_version.clone(), room_version: body.room_version.clone(),
@ -2959,7 +2931,7 @@ pub async fn create_invite_route(
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?;
// Generate event id // Generate event id
let event_id = EventId::try_from(&*format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&signed_event, &body.room_version) ruma::signatures::reference_hash(&signed_event, &body.room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -2972,7 +2944,7 @@ pub async fn create_invite_route(
CanonicalJsonValue::String(event_id.into()), CanonicalJsonValue::String(event_id.into()),
); );
let sender = serde_json::from_value( let sender: Box<_> = serde_json::from_value(
signed_event signed_event
.get("sender") .get("sender")
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
@ -2984,7 +2956,7 @@ pub async fn create_invite_route(
) )
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?;
let invited_user = serde_json::from_value( let invited_user: Box<_> = serde_json::from_value(
signed_event signed_event
.get("state_key") .get("state_key")
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
@ -3235,7 +3207,7 @@ pub(crate) async fn fetch_required_signing_keys(
let fetch_res = fetch_signing_keys( let fetch_res = fetch_signing_keys(
db, db,
&Box::<ServerName>::try_from(&**signature_server).map_err(|_| { signature_server.as_str().try_into().map_err(|_| {
Error::BadServerResponse("Invalid servername in signatures of server response pdu.") Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
})?, })?,
signature_ids, signature_ids,
@ -3263,7 +3235,7 @@ pub(crate) async fn fetch_required_signing_keys(
// the PDUs and either cache the key or add it to the list that needs to be retrieved. // the PDUs and either cache the key or add it to the list that needs to be retrieved.
fn get_server_keys_from_cache( fn get_server_keys_from_cache(
pdu: &RawJsonValue, pdu: &RawJsonValue,
servers: &mut BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, QueryCriteria>>, servers: &mut BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>>,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, String>>>,
db: &Database, db: &Database,
@ -3273,11 +3245,12 @@ fn get_server_keys_from_cache(
Error::BadServerResponse("Invalid PDU in server response") Error::BadServerResponse("Invalid PDU in server response")
})?; })?;
let event_id = EventId::try_from(&*format!( let event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) );
let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
if let Some((time, tries)) = db if let Some((time, tries)) = db
@ -3285,7 +3258,7 @@ fn get_server_keys_from_cache(
.bad_event_ratelimiter .bad_event_ratelimiter
.read() .read()
.unwrap() .unwrap()
.get(&event_id) .get(event_id)
{ {
// Exponential backoff // Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
@ -3319,7 +3292,7 @@ fn get_server_keys_from_cache(
let contains_all_ids = let contains_all_ids =
|keys: &BTreeMap<String, String>| signature_ids.iter().all(|id| keys.contains_key(id)); |keys: &BTreeMap<String, String>| signature_ids.iter().all(|id| keys.contains_key(id));
let origin = &Box::<ServerName>::try_from(&**signature_server).map_err(|_| { let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
Error::BadServerResponse("Invalid servername in signatures of server response pdu.") Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
})?; })?;
@ -3338,7 +3311,7 @@ fn get_server_keys_from_cache(
if !contains_all_ids(&result) { if !contains_all_ids(&result) {
trace!("Signing key not loaded for {}", origin); trace!("Signing key not loaded for {}", origin);
servers.insert(origin.clone(), BTreeMap::new()); servers.insert(origin.to_owned(), BTreeMap::new());
} }
pub_key_map.insert(origin.to_string(), result); pub_key_map.insert(origin.to_string(), result);
@ -3353,7 +3326,7 @@ pub(crate) async fn fetch_join_signing_keys(
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
db: &Database, db: &Database,
) -> Result<()> { ) -> Result<()> {
let mut servers: BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, QueryCriteria>> = let mut servers: BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>> =
BTreeMap::new(); BTreeMap::new();
{ {
@ -3387,10 +3360,6 @@ pub(crate) async fn fetch_join_signing_keys(
server, server,
get_remote_server_keys_batch::v2::Request { get_remote_server_keys_batch::v2::Request {
server_keys: servers.clone(), server_keys: servers.clone(),
minimum_valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(60),
)
.expect("time is valid"),
}, },
) )
.await .await

Loading…
Cancel
Save