Compare commits

..

No commits in common. 'next' and 'caches' have entirely different histories.
next ... caches

  1. 2
      .dockerignore
  2. 11
      .github/ISSUE_TEMPLATE/Issue.md
  3. 1
      .gitignore
  4. 319
      .gitlab-ci.yml
  5. 19
      .gitlab/issue_templates/Bug Report.md
  6. 17
      .gitlab/issue_templates/Feature Request.md
  7. 15
      .gitlab/issue_templates/Issue Template.md
  8. 8
      .gitlab/merge_request_templates/MR.md
  9. 54
      APPSERVICES.md
  10. 1196
      Cargo.lock
  11. 87
      Cargo.toml
  12. 84
      DEPLOY.md
  13. 127
      Dockerfile
  14. 89
      README.md
  15. 13
      conduit-example.toml
  16. 8
      debian/postinst
  17. 30
      docker-compose.yml
  18. 113
      docker/README.md
  19. 78
      docker/ci-binaries-packaging.Dockerfile
  20. 23
      docker/docker-compose.override.traefik.yml
  21. 26
      docker/docker-compose.traefik.yml
  22. 13
      docker/healthcheck.sh
  23. 2
      rust-toolchain
  24. 12
      src/appservice_server.rs
  25. 310
      src/client_server/account.rs
  26. 54
      src/client_server/alias.rs
  27. 183
      src/client_server/backup.rs
  28. 20
      src/client_server/capabilities.rs
  29. 47
      src/client_server/config.rs
  30. 33
      src/client_server/context.rs
  31. 108
      src/client_server/device.rs
  32. 300
      src/client_server/directory.rs
  33. 6
      src/client_server/filter.rs
  34. 225
      src/client_server/keys.rs
  35. 37
      src/client_server/media.rs
  36. 767
      src/client_server/membership.rs
  37. 88
      src/client_server/message.rs
  38. 11
      src/client_server/mod.rs
  39. 32
      src/client_server/presence.rs
  40. 203
      src/client_server/profile.rs
  41. 173
      src/client_server/push.rs
  42. 40
      src/client_server/read_marker.rs
  43. 39
      src/client_server/redact.rs
  44. 84
      src/client_server/report.rs
  45. 514
      src/client_server/room.rs
  46. 21
      src/client_server/search.rs
  47. 66
      src/client_server/session.rs
  48. 103
      src/client_server/state.rs
  49. 550
      src/client_server/sync.rs
  50. 57
      src/client_server/tag.rs
  51. 3
      src/client_server/thirdparty.rs
  52. 54
      src/client_server/to_device.rs
  53. 16
      src/client_server/typing.rs
  54. 2
      src/client_server/unversioned.rs
  55. 36
      src/client_server/user_directory.rs
  56. 58
      src/client_server/voip.rs
  57. 753
      src/database.rs
  58. 303
      src/database/abstraction.rs
  59. 240
      src/database/abstraction/heed.rs
  60. 128
      src/database/abstraction/sled.rs
  61. 392
      src/database/abstraction/sqlite.rs
  62. 89
      src/database/account_data.rs
  63. 97
      src/database/admin.rs
  64. 23
      src/database/appservice.rs
  65. 186
      src/database/globals.rs
  66. 66
      src/database/key_backups.rs
  67. 16
      src/database/media.rs
  68. 146
      src/database/proxy.rs
  69. 95
      src/database/pusher.rs
  70. 2410
      src/database/rooms.rs
  71. 100
      src/database/rooms/edus.rs
  72. 350
      src/database/sending.rs
  73. 189
      src/database/uiaa.rs
  74. 197
      src/database/users.rs
  75. 16
      src/error.rs
  76. 10
      src/lib.rs
  77. 130
      src/main.rs
  78. 165
      src/pdu.rs
  79. 71
      src/ruma_wrapper.rs
  80. 2565
      src/server_server.rs
  81. 44
      src/utils.rs
  82. 1
      tests/sytest/sytest-whitelist
  83. 15
      tests/test-config.toml

2
.dockerignore

@ -14,8 +14,6 @@ docker-compose* @@ -14,8 +14,6 @@ docker-compose*
# Git folder
.git
.gitea
.gitlab
.github
# Dot files
.env

11
.github/ISSUE_TEMPLATE/Issue.md

@ -1,11 +0,0 @@ @@ -1,11 +0,0 @@
---
name: "Issue with / Feature Request for Conduit"
about: "Please file issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new"
title: "CLOSE ME"
---
**⚠ Conduit development does not happen on GitHub. Issues opened here will not be addressed**
Please open issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new

1
.gitignore vendored

@ -59,7 +59,6 @@ $RECYCLE.BIN/ @@ -59,7 +59,6 @@ $RECYCLE.BIN/
# Conduit
Rocket.toml
conduit.toml
conduit.db
# Etc.
**/*.rs.bk

319
.gitlab-ci.yml

@ -1,309 +1,28 @@ @@ -1,309 +1,28 @@
stages:
- build
- build docker image
- test
- upload artifacts
image: "rust:latest"
variables:
GIT_SUBMODULE_STRATEGY: recursive
FF_USE_FASTZIP: 1
CACHE_COMPRESSION_LEVEL: fastest
# --------------------------------------------------------------------- #
# Cargo: Compiling for different architectures #
# --------------------------------------------------------------------- #
.build-cargo-shared-settings:
stage: "build"
needs: []
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
- if: '$CI_COMMIT_BRANCH == "next"'
- if: "$CI_COMMIT_TAG"
interruptible: true
image: "rust:latest"
tags: ["docker"]
cache:
paths:
- cargohome
- target/
key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release"
variables:
CARGO_PROFILE_RELEASE_LTO: "true"
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
before_script:
- 'echo "Building for target $TARGET"'
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
- "rustc --version && cargo --version && rustup show" # Print version info for debugging
- "rustup target add $TARGET"
script:
- time cargo build --target $TARGET --release
- 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"'
artifacts:
expire_in: never
build:release:cargo:x86_64-unknown-linux-musl-with-debug:
extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:x86_64-musl
variables:
CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling
TARGET: "x86_64-unknown-linux-musl"
after_script:
- "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug"
artifacts:
name: "conduit-x86_64-unknown-linux-musl-with-debug"
paths:
- "conduit-x86_64-unknown-linux-musl-with-debug"
expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug"
default:
tags: [docker]
build:release:cargo:x86_64-unknown-linux-musl:
extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:x86_64-musl
variables:
TARGET: "x86_64-unknown-linux-musl"
artifacts:
name: "conduit-x86_64-unknown-linux-musl"
paths:
- "conduit-x86_64-unknown-linux-musl"
expose_as: "Conduit for x86_64-unknown-linux-musl"
cache:
paths:
- target
- cargohome
build:release:cargo:arm-unknown-linux-musleabihf:
extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:arm-musleabihf
variables:
TARGET: "arm-unknown-linux-musleabihf"
artifacts:
name: "conduit-arm-unknown-linux-musleabihf"
paths:
- "conduit-arm-unknown-linux-musleabihf"
expose_as: "Conduit for arm-unknown-linux-musleabihf"
build:release:cargo:armv7-unknown-linux-musleabihf:
extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:armv7-musleabihf
variables:
TARGET: "armv7-unknown-linux-musleabihf"
artifacts:
name: "conduit-armv7-unknown-linux-musleabihf"
paths:
- "conduit-armv7-unknown-linux-musleabihf"
expose_as: "Conduit for armv7-unknown-linux-musleabihf"
build:release:cargo:aarch64-unknown-linux-musl:
extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:aarch64-musl
variables:
TARGET: "aarch64-unknown-linux-musl"
artifacts:
name: "conduit-aarch64-unknown-linux-musl"
paths:
- "conduit-aarch64-unknown-linux-musl"
expose_as: "Conduit for aarch64-unknown-linux-musl"
.cargo-debug-shared-settings:
extends: ".build-cargo-shared-settings"
rules:
- if: '$CI_COMMIT_BRANCH != "master"'
cache:
key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug"
script:
- "time cargo build --target $TARGET"
- 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"'
artifacts:
expire_in: 4 weeks
build:debug:cargo:x86_64-unknown-linux-musl:
extends: ".cargo-debug-shared-settings"
image: messense/rust-musl-cross:x86_64-musl
variables:
TARGET: "x86_64-unknown-linux-musl"
artifacts:
name: "conduit-debug-x86_64-unknown-linux-musl"
paths:
- "conduit-debug-x86_64-unknown-linux-musl"
expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl"
# --------------------------------------------------------------------- #
# Create and publish docker image #
# --------------------------------------------------------------------- #
.docker-shared-settings:
stage: "build docker image"
image: jdrouet/docker-with-buildx:stable
tags: ["docker"]
services:
- docker:dind
needs:
- "build:release:cargo:x86_64-unknown-linux-musl"
- "build:release:cargo:arm-unknown-linux-musleabihf"
- "build:release:cargo:armv7-unknown-linux-musleabihf"
- "build:release:cargo:aarch64-unknown-linux-musl"
variables:
DOCKER_HOST: tcp://docker:2375/
DOCKER_TLS_CERTDIR: ""
DOCKER_DRIVER: overlay2
PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64"
DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile"
cache:
paths:
- docker_cache
key: "$CI_JOB_NAME"
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# Only log in to Dockerhub if the credentials are given:
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
script:
# Prepare buildx to build multiarch stuff:
- docker context create 'ci-context'
- docker buildx create --name 'multiarch-builder' --use 'ci-context'
# Copy binaries to their docker arch path
- mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64
- mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6
- mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7
- mv ./conduit-aarch64-unknown-linux-musl linux/arm64
- 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"'
# Build and push image:
- >
docker buildx build
--pull
--push
--cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache
--cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache
--build-arg CREATED=$CREATED
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
--platform "$PLATFORMS"
--tag "$TAG"
--tag "$TAG-alpine"
--tag "$TAG-commit-$CI_COMMIT_SHORT_SHA"
--file "$DOCKER_FILE" .
docker:next:gitlab:
extends: .docker-shared-settings
rules:
- if: '$CI_COMMIT_BRANCH == "next"'
variables:
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
docker:next:dockerhub:
extends: .docker-shared-settings
rules:
- if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB'
variables:
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
docker:master:gitlab:
extends: .docker-shared-settings
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
variables:
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
docker:master:dockerhub:
extends: .docker-shared-settings
rules:
- if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB'
variables:
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
variables:
GIT_SUBMODULE_STRATEGY: recursive
CARGO_HOME: "cargohome"
FF_USE_FASTZIP: 1
# --------------------------------------------------------------------- #
# Run tests #
# --------------------------------------------------------------------- #
before_script:
- mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps"
- apt-get update -yqq
- apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config
- rustup component add clippy rustfmt
test:cargo:
stage: "test"
needs: []
image: "rust:latest"
tags: ["docker"]
variables:
CARGO_HOME: "cargohome"
cache:
paths:
- target
- cargohome
key: test_cache
interruptible: true
before_script:
- mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps"
- apt-get update -yqq
- apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget
- rustup component add clippy rustfmt
- wget "https://faulty-storage.de/gitlab-report"
- chmod +x ./gitlab-report
script:
- rustc --version && cargo --version # Print version info for debugging
- rustc --version && cargo --version # Print version info for debugging
- cargo test --workspace --verbose --locked
- cargo fmt --all -- --check
- "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml"
- "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
artifacts:
when: always
reports:
junit: report.xml
codequality: gl-code-quality-report.json
test:sytest:
stage: "test"
allow_failure: true
needs:
- "build:debug:cargo:x86_64-unknown-linux-musl"
image:
name: "valkum/sytest-conduit:latest"
entrypoint: [""]
tags: ["docker"]
variables:
PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz"
before_script:
- "mkdir -p /app"
- "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit"
- "chmod +x /app/conduit"
- "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src"
- "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/"
- "cd /"
script:
- "SYTEST_EXIT_CODE=0"
- "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1"
- 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap'
- "exit $SYTEST_EXIT_CODE"
artifacts:
when: always
paths:
- "$CI_PROJECT_DIR/sytest.xml"
- "$CI_PROJECT_DIR/results.tap"
reports:
junit: "$CI_PROJECT_DIR/sytest.xml"
# --------------------------------------------------------------------- #
# Store binaries as package so they have download urls #
# --------------------------------------------------------------------- #
publish:package:
stage: "upload artifacts"
needs:
- "build:release:cargo:x86_64-unknown-linux-musl"
- "build:release:cargo:arm-unknown-linux-musleabihf"
- "build:release:cargo:armv7-unknown-linux-musleabihf"
- "build:release:cargo:aarch64-unknown-linux-musl"
# - "build:cargo-deb:x86_64-unknown-linux-gnu"
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
- if: '$CI_COMMIT_BRANCH == "next"'
- if: "$CI_COMMIT_TAG"
image: curlimages/curl:latest
tags: ["docker"]
variables:
GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
script:
- 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"'
# Avoid duplicate pipelines
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
workflow:
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
when: never
- if: "$CI_COMMIT_BRANCH"
- cargo clippy

19
.gitlab/issue_templates/Bug Report.md

@ -1,19 +0,0 @@ @@ -1,19 +0,0 @@
<!--
If you're requesting a new feature, that isn't part of this project yet,
then please consider filling out a "Feature Request" instead!
If you need a hand setting up your conduit server, feel free to ask for help in the
Conduit Matrix Chat: https://matrix.to/#/#conduit:fachschaften.org.
-->
### Description
<!-- What did you do and what happened? Why is that bad? -->
### System Configuration
<!-- Other data that might help us debug this issue, like os, conduit version, database backend -->
Conduit Version:
Database backend (default is sqlite): sqlite
/label ~conduit

17
.gitlab/issue_templates/Feature Request.md

@ -1,17 +0,0 @@ @@ -1,17 +0,0 @@
<!--
If you want to report a bug or an error,
then please consider filling out a "Bug Report" instead!
-->
### Is your feature request related to a problem? Please describe.
<!-- Eg. I'm always frustrated when [...] -->
### Describe the solution you'd like
/label ~conduit

15
.gitlab/issue_templates/Issue Template.md

@ -0,0 +1,15 @@ @@ -0,0 +1,15 @@
# Headline
### Description
/label ~conduit

8
.gitlab/merge_request_templates/MR.md

@ -1,8 +0,0 @@ @@ -1,8 +0,0 @@
<!-- Please describe your changes here -->
-----------------------------------------------------------------------------
- [ ] I ran `cargo fmt` and `cargo test`
- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license

54
APPSERVICES.md

@ -4,7 +4,14 @@ @@ -4,7 +4,14 @@
If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
## Set up the appservice - general instructions
## Tested appservices
Here are some appservices we tested and that work with Conduit:
- matrix-appservice-discord
- mautrix-hangouts
- mautrix-telegram
## Set up the appservice
Follow whatever instructions are given by the appservice. This usually includes
downloading, changing its config (setting domain, homeserver url, port etc.)
@ -39,48 +46,3 @@ Then you are done. Conduit will send messages to the appservices and the @@ -39,48 +46,3 @@ Then you are done. Conduit will send messages to the appservices and the
appservice can send requests to the homeserver. You don't need to restart
Conduit, but if it doesn't work, restarting while the appservice is running
could help.
## Appservice-specific instructions
### Tested appservices
These appservices have been tested and work with Conduit without any extra steps:
- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord)
- [mautrix-hangouts](https://github.com/mautrix/hangouts/)
- [mautrix-telegram](https://github.com/mautrix/telegram/)
### [mautrix-signal](https://github.com/mautrix/signal)
There are a few things you need to do, in order for the Signal bridge (at least
up to version `0.2.0`) to work. How you do this depends on whether you use
Docker or `virtualenv` to run it. In either case you need to modify
[portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py).
Do this **before** following the bridge installation guide.
1. **Create a copy of `portal.py`**. Go to
[portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py)
at [mautrix-signal](https://github.com/mautrix/signal) (make sure you change to
the correct commit/version of mautrix-signal you're using) and copy its
content. Create a new `portal.py` on your system and paste the content in.
2. **Patch the copy**. Exact line numbers may be slightly different, look nearby if they don't match:
- [Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020)
```diff
--- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100
+++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100
```
- [Between lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) add a new line:
```diff
"type": str(EventType.ROOM_POWER_LEVELS),
+++ "state_key": "",
"content": power_levels.serialize(),
```
3. **Deploy the patch**. This is different depending on how you have `mautrix-signal` deployed:
- [*If using virtualenv*] Copy your patched `portal.py` to `./lib/python3.7/site-packages/mautrix_signal/portal.py` (the exact version of Python may be different on your system).
- [*If using Docker*] Map the patched `portal.py` into the `mautrix-signal` container:
```yaml
volumes:
- ./your/path/on/host/portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py
```
4. Now continue with the [bridge installation instructions ](https://docs.mau.fi/bridges/index.html) and the general bridge notes above.

1196
Cargo.lock generated

File diff suppressed because it is too large Load Diff

87
Cargo.toml

@ -6,53 +6,54 @@ authors = ["timokoesters <timo@koesters.xyz>"] @@ -6,53 +6,54 @@ authors = ["timokoesters <timo@koesters.xyz>"]
homepage = "https://conduit.rs"
repository = "https://gitlab.com/famedly/conduit"
readme = "README.md"
version = "0.2.0"
version = "0.1.0"
edition = "2018"
rust = "1.50"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# Used to handle requests
# TODO: This can become optional as soon as proper configs are supported
# rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests
rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests
rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests
# Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "5a7e2cddcf257e367465cced51442c91e8f557c9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
# Used for long polling and federation sender, should be the same as rocket::tokio
tokio = "1.11.0"
tokio = "1.2.0"
# Used for storing data permanently
sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true }
rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true }
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
# Used for the http request / response body type for Ruma endpoints used with reqwest
bytes = "1.1.0"
bytes = "1.0.1"
# Used for emitting log entries
log = "0.4.14"
# Used for rocket<->ruma conversions
http = "0.2.4"
http = "0.2.3"
# Used to find data directory for default db path
directories = "3.0.2"
directories = "3.0.1"
# Used for ruma wrapper
serde_json = { version = "1.0.67", features = ["raw_value"] }
serde_json = { version = "1.0.64", features = ["raw_value"] }
# Used for appservice registration files
serde_yaml = "0.8.20"
serde_yaml = "0.8.17"
# Used for pdu definition
serde = { version = "1.0.130", features = ["rc"] }
serde = "1.0.123"
# Used for secure identifiers
rand = "0.8.4"
rand = "0.8.3"
# Used to hash passwords
rust-argon2 = "0.8.3"
# Used to send requests
reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
reqwest = { version = "0.11.3", default-features = false, features = ["rustls-tls-native-roots"] }
# Custom TLS verifier
rustls = { version = "0.19.1", features = ["dangerous_configuration"] }
rustls = { version = "0.19", features = ["dangerous_configuration"] }
rustls-native-certs = "0.5.0"
webpki = "0.22.0"
webpki = "0.21.0"
# Used for conduit::Error type
thiserror = "1.0.28"
thiserror = "1.0.24"
# Used to generate thumbnails for images
image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] }
# Used to encode server public key
@ -60,35 +61,24 @@ base64 = "0.13.0" @@ -60,35 +61,24 @@ base64 = "0.13.0"
# Used when hashing the state
ring = "0.16.20"
# Used when querying the SRV record of other servers
trust-dns-resolver = "0.20.3"
trust-dns-resolver = "0.20.0"
# Used to find matching events for appservices
regex = "1.5.4"
regex = "1.4.3"
# jwt jsonwebtokens
jsonwebtoken = "7.2.0"
# Performance measurements
tracing = { version = "0.1.26", features = ["release_max_level_warn"] }
tracing-subscriber = "0.2.20"
tracing-flame = "0.1.0"
opentelemetry = { version = "0.16.0", features = ["rt-tokio"] }
opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] }
tracing = "0.1.25"
opentelemetry = "0.12.0"
tracing-subscriber = "0.2.16"
tracing-opentelemetry = "0.11.0"
opentelemetry-jaeger = "0.11.0"
pretty_env_logger = "0.4.0"
lru-cache = "0.1.2"
rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] }
parking_lot = { version = "0.11.2", optional = true }
crossbeam = { version = "0.8.1", optional = true }
num_cpus = "1.13.0"
threadpool = "1.8.1"
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
thread_local = "1.1.3"
# used for TURN server authentication
hmac = "0.11.0"
sha-1 = "0.9.8"
[features]
default = ["conduit_bin", "backend_sqlite"]
default = ["conduit_bin", "backend_sled"]
backend_sled = ["sled"]
backend_sqlite = ["sqlite"]
backend_heed = ["heed", "crossbeam"]
sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"]
backend_rocksdb = ["rocksdb"]
conduit_bin = [] # TODO: add rocket to this when it is optional
[[bin]]
@ -122,19 +112,6 @@ conf-files = [ @@ -122,19 +112,6 @@ conf-files = [
maintainer-scripts = "debian/"
systemd-units = { unit-name = "matrix-conduit" }
[profile.dev]
lto = 'off'
incremental = true
[profile.release]
lto = 'thin'
incremental = true
codegen-units=32
# If you want to make flamegraphs, enable debug info:
# debug = true
# For releases also try to max optimizations for dependencies:
[profile.release.build-override]
opt-level = 3
[profile.release.package."*"]
opt-level = 3
# For flamegraphs:
#[profile.release]
#debug = true

84
DEPLOY.md

@ -2,27 +2,16 @@ @@ -2,27 +2,16 @@
## Getting help
If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us
in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
## Installing Conduit
Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore
only offer Linux binaries.
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
| CPU Architecture | Download stable version |
| ------------------------------------------- | ------------------------------ |
| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] |
| armv6 | [Download][armv6-musl-master] |
| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] |
| armv8 / aarch64 | [Download][armv8-musl-master] |
[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
You may simply download the binary that fits your machine. Run `uname -m` to see
what you need. Now copy the right url:
- x84_64: `https://conduit.rs/master/x86_64/conduit-bin`
- armv7: `https://conduit.rs/master/armv7/conduit-bin`
- armv8: `https://conduit.rs/master/armv8/conduit-bin`
- arm: `https://conduit.rs/master/arm/conduit-bin`
```bash
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
@ -30,30 +19,29 @@ $ sudo chmod +x /usr/local/bin/matrix-conduit @@ -30,30 +19,29 @@ $ sudo chmod +x /usr/local/bin/matrix-conduit
```
Alternatively, you may compile the binary yourself using
```bash
$ cargo build --release
```
Note that this currently requires Rust 1.50.
If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md).
## Adding a Conduit user
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
you to make sure that the file permissions are correctly set up.
While Conduit can run as any user it is usually better to use dedicated users for different services.
This also allows you to make sure that the file permissions are correctly set up.
In Debian you can use this command to create a Conduit user:
```bash
```
sudo adduser --system conduit --no-create-home
```
## Setting up a systemd service
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
server reboots. Simply paste the default systemd service you can find below into
Now we'll set up a systemd service for Conduit, so it's easy to start/stop
Conduit and set it to autostart when your server reboots. Simply paste the
default systemd service you can find below into
`/etc/systemd/system/conduit.service`.
```systemd
@ -78,10 +66,10 @@ Finally, run @@ -78,10 +66,10 @@ Finally, run
$ sudo systemctl daemon-reload
```
## Creating the Conduit configuration file
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
to read it. You need to change at least the server name.**
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.**
```toml
[global]
@ -108,8 +96,8 @@ port = 6167 @@ -108,8 +96,8 @@ port = 6167
# Max size for uploads
max_request_size = 20_000_000 # in bytes
# Enables registration. If set to false, no users can register on this server.
allow_registration = true
# Disabling registration means no new users will be able to register on this server
allow_registration = false
# Disable encryption, so no new encrypted rooms can be created
# Note: existing rooms will continue to work
@ -118,31 +106,30 @@ allow_federation = true @@ -118,31 +106,30 @@ allow_federation = true
trusted_servers = ["matrix.org"]
#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
#workers = 4 # default: cpu core count * 2
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
# The total amount of memory that the database will use.
#db_cache_capacity_mb = 200
```
## Setting the correct file permissions
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
Debian:
As we are using a Conduit specific user we need to allow it to read the config.
To do that you can run this command on Debian:
```bash
```
sudo chown -R conduit:nogroup /etc/matrix-conduit
```
If you use the default database path you also need to run this:
```bash
```
sudo mkdir -p /var/lib/matrix-conduit/conduit_db
sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db
```
## Setting up the Reverse Proxy
This depends on whether you use Apache, Nginx or another web server.
@ -151,7 +138,7 @@ This depends on whether you use Apache, Nginx or another web server. @@ -151,7 +138,7 @@ This depends on whether you use Apache, Nginx or another web server.
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
```apache
```
Listen 8448
<VirtualHost *:443 *:8448>
@ -162,6 +149,9 @@ AllowEncodedSlashes NoDecode @@ -162,6 +149,9 @@ AllowEncodedSlashes NoDecode
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ nocanon
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
Include /etc/letsencrypt/options-ssl-apache.conf
SSLCertificateFile /etc/letsencrypt/live/your.server.name/fullchain.pem # EDIT THIS
SSLCertificateKeyFile /etc/letsencrypt/live/your.server.name/privkey.pem # EDIT THIS
</VirtualHost>
```
@ -171,11 +161,13 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ @@ -171,11 +161,13 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
$ sudo systemctl reload apache2
```
### Nginx
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
If you use Nginx and not Apache, add the following server section inside the
http section of `/etc/nginx/nginx.conf`
```nginx
```
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
@ -196,13 +188,13 @@ server { @@ -196,13 +188,13 @@ server {
include /etc/letsencrypt/options-ssl-nginx.conf;
}
```
**You need to make some edits again.** When you are done, run
```bash
$ sudo systemctl reload nginx
```
## SSL Certificate
The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this:
@ -211,6 +203,7 @@ The easiest way to get an SSL certificate, if you don't have one already, is to @@ -211,6 +203,7 @@ The easiest way to get an SSL certificate, if you don't have one already, is to
$ sudo certbot -d your.server.name
```
## You're done!
Now you can start Conduit with:
@ -225,15 +218,4 @@ Set it to start automatically when your system boots with: @@ -225,15 +218,4 @@ Set it to start automatically when your system boots with:
$ sudo systemctl enable conduit
```
## How do I know it works?
You can open <https://app.element.io>, enter your homeserver and try to register.
You can also use these commands as a quick health check.
```bash
$ curl https://your.server.name/_matrix/client/versions
$ curl https://your.server.name:8448/_matrix/client/versions
```
If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md).

127
Dockerfile

@ -1,65 +1,75 @@ @@ -1,65 +1,75 @@
# syntax=docker/dockerfile:1
FROM docker.io/rust:1.53-alpine AS builder
WORKDIR /usr/src/conduit
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
# Install required packages to build Conduit and it's dependencies
RUN apk add musl-dev
# == Build dependencies without our own code separately for caching ==
#
# Need a fake main.rs since Cargo refuses to build anything otherwise.
#
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
# request that would allow just dependencies to be compiled, presumably
# regardless of whether source files are available.
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
COPY Cargo.toml Cargo.lock ./
RUN cargo build --release && rm -r src
########################## BUILD IMAGE ##########################
# Alpine build image to build Conduit's statically compiled binary
FROM alpine:3.12 as builder
# Copy over actual Conduit sources
COPY src src
# Specifies if the local project is build or if Conduit gets build
# from the official git repository. Defaults to the git repo.
ARG LOCAL=false
# Specifies which revision/commit is build. Defaults to HEAD
ARG GIT_REF=origin/master
# main.rs and lib.rs need their timestamp updated for this to work correctly since
# otherwise the build with the fake main.rs from above is newer than the
# source files (COPY preserves timestamps).
#
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
# Add 'edge'-repository to get Rust 1.45
RUN sed -i \
-e 's|v3\.12|edge|' \
/etc/apk/repositories
# Install packages needed for building all crates
RUN apk add --no-cache \
cargo \
openssl-dev
# Copy project files from current folder
COPY . .
# Build it from the copied local files or from the official git repository
RUN if [[ $LOCAL == "true" ]]; then \
cargo install --path . ; \
else \
cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF}; \
fi
# ---------------------------------------------------------------------------------------------------------------
# Stuff below this line actually ends up in the resulting docker image
# ---------------------------------------------------------------------------------------------------------------
FROM docker.io/alpine:3.15.0 AS runner
########################## RUNTIME IMAGE ##########################
# Create new stage with a minimal image for the actual
# runtime image/container
FROM alpine:3.12
# Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167
ARG CREATED
ARG VERSION
ARG GIT_REF=origin/master
# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs.
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# Conduit needs:
# ca-certificates: for https
# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \
ca-certificates \
libgcc
# Created directory for the database and media files
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
# including a custom label specifying the build command
LABEL org.opencontainers.image.created=${CREATED} \
org.opencontainers.image.authors="Conduit Contributors" \
org.opencontainers.image.title="Conduit" \
org.opencontainers.image.version=${VERSION} \
org.opencontainers.image.vendor="Conduit Contributors" \
org.opencontainers.image.description="A Matrix homeserver written in Rust" \
org.opencontainers.image.url="https://conduit.rs/" \
org.opencontainers.image.revision=${GIT_REF} \
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
org.opencontainers.image.licenses="Apache-2.0" \
org.opencontainers.image.documentation="" \
org.opencontainers.image.ref.name="" \
org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \
maintainer="Weasy666"
# Standard port on which Rocket launches
EXPOSE 8000
# Copy config files from context and the binary from
# the "builder" stage to the current stage into folder
# /srv/conduit and create data folder for database
RUN mkdir -p /srv/conduit/.local/share/conduit
COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/
# Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Copy over the actual Conduit binary from the builder stage
COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit
# Improve security: Don't run stuff as root, that does not need to run as root:
# Add www-data user and group with UID 82, as used by alpine
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
RUN set -x ; \
@ -69,13 +79,22 @@ RUN set -x ; \ @@ -69,13 +79,22 @@ RUN set -x ; \
# Change ownership of Conduit files to www-data user and group
RUN chown -cR www-data:www-data /srv/conduit
RUN chmod +x /srv/conduit/healthcheck.sh
# Change user to www-data
# Install packages needed to run Conduit
RUN apk add --no-cache \
ca-certificates \
curl \
libgcc
# Create a volume for the database, to persist its contents
VOLUME ["/srv/conduit/.local/share/conduit"]
# Test if Conduit is still alive, uses the same endpoint as Element
HEALTHCHECK --start-period=2s CMD curl --fail -s http://localhost:8000/_matrix/client/versions || curl -k --fail -s https://localhost:8000/_matrix/client/versions || exit 1
# Set user to www-data
USER www-data
# Set container home directory
WORKDIR /srv/conduit
# Run Conduit and print backtraces on panics
ENV RUST_BACKTRACE=1
ENTRYPOINT [ "/srv/conduit/conduit" ]
# Run Conduit
ENTRYPOINT [ "/srv/conduit/conduit" ]

89
README.md

@ -3,42 +3,90 @@ @@ -3,42 +3,90 @@
#### What is the goal?
An efficient Matrix homeserver that's easy to set up and just works. You can install
A fast Matrix homeserver that's easy to set up and just works. You can install
it on a mini-computer like the Raspberry Pi to host Matrix for your family,
friends or company.
#### Can I try it out?
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
example) and registering on the `conduit.rs` homeserver.
Yes! Just open a Matrix client (<https://app.element.io> or Element Android for
example) and register on the `https://conduit.koesters.xyz` homeserver.
It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which
was used in the Samsung Galaxy S5. It joined many big rooms including Matrix
HQ.
#### What is it built on?
- [Ruma](https://www.ruma.io): Useful structures for endpoint requests and
responses that can be (de)serialized
- [Sled](https://github.com/spacejam/sled): A simple (key, value) database with
good performance
- [Rocket](https://rocket.rs): A flexible web framework
#### What is the current status?
As of 2021-09-01, Conduit is Beta, meaning you can join and participate in most
Matrix rooms, but not all features are supported and you might run into bugs
from time to time.
Conduit can already be used chat with other users on Conduit, chat with users
from other Matrix servers and even to chat with users on other platforms using
appservices. When chatting with users on the same Conduit server, everything
should work assuming you use a compatible client.
**You should not join Matrix rooms without asking the admins first.** We do not
know whether Conduit is safe for general use yet, so you should assume there is
some chance that it breaks rooms permanently for all participating users. We
are not aware of such a bug today, but we would like to do more testing.
There are still a few important features missing:
- E2EE verification over federation
- Outgoing read receipts, typing, presence over federation
- Database stability (currently you might have to do manual upgrades or even wipe the db for new versions)
- End-to-end encrypted chats over federation
- Typing, presence, read receipts etc. over federation
- Lots of testing
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
#### How can I deploy my own?
Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)\
Debian package: [debian/README.Debian](debian/README.Debian)\
Docker: [docker/README.md](docker/README.md)
##### Deploy
Download or compile a Conduit binary, set up the config and call it from somewhere like a systemd script. [Read
more](DEPLOY.md)
If you want to connect an Appservice to Conduit, take a look at the [Appservice Guide](APPSERVICES.md).
##### Deploy using a Debian package
You need to have the `deb` helper command installed that creates Debian packages from Cargo projects (see [cargo-deb](https://github.com/mmstick/cargo-deb/) for more info):
```shell
$ cargo install cargo-deb
```
Then, you can create and install a Debian package at a whim:
```shell
$ cargo deb
$ dpkg -i target/debian/matrix-conduit_0.1.0_amd64.deb
```
This will build, package, install, configure and start Conduit. [Read more](debian/README.Debian).
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
Note that `cargo deb` supports [cross-compilation](https://github.com/mmstick/cargo-deb/#cross-compilation) too!
Official Debian packages will follow once Conduit starts to have stable releases.
##### Deploy using Docker
Pull and run the docker image with
``` bash
docker pull matrixconduit/matrix-conduit:latest
docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest
```
> <b>Note:</b> You also need to supply a `conduit.toml` config file, you can find an example [here](./conduit-example.toml).
> Or you can pass in `-e CONDUIT_CONFIG=""` and configure Conduit purely with env vars.
Or build and run it with docker or docker-compose. [Read more](docker/README.md)
#### How can I contribute?
@ -50,17 +98,6 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md] @@ -50,17 +98,6 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md]
3. Fork the repo and work on the issue. #conduit:nordgedanken.dev is happy to help :)
4. Submit a MR
#### Thanks to
Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project.
Thanks to the contributors to Conduit and all libraries we use, for example:
- Ruma: A clean library for the Matrix Spec in Rust
- Rocket: A flexible web framework
#### Donate
Liberapay: <https://liberapay.com/timokoesters/>\

13
conduit-example.toml

@ -12,7 +12,7 @@ @@ -12,7 +12,7 @@
#server_name = "your.server.name"
# This is the only directory where Conduit will save its data
database_path = "/var/lib/conduit/"
database_path = "/var/lib/conduit/conduit.db"
# The port Conduit will be running on. You need to set up a reverse proxy in
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
@ -22,8 +22,8 @@ port = 6167 @@ -22,8 +22,8 @@ port = 6167
# Max size for uploads
max_request_size = 20_000_000 # in bytes
# Enables registration. If set to false, no users can register on this server.
allow_registration = true
# Disable registration. No new users will be able to register on this server
#allow_registration = false
# Disable encryption, so no new encrypted rooms can be created
# Note: existing rooms will continue to work
@ -35,14 +35,9 @@ allow_registration = true @@ -35,14 +35,9 @@ allow_registration = true
trusted_servers = ["matrix.org"]
#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
#workers = 4 # default: cpu core count * 2
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
proxy = "none" # more examples can be found at src/database/proxy.rs:6
# The total amount of memory that the database will use.
#db_cache_capacity_mb = 200

8
debian/postinst vendored

@ -62,8 +62,8 @@ port = ${CONDUIT_PORT} @@ -62,8 +62,8 @@ port = ${CONDUIT_PORT}
# Max size for uploads
max_request_size = 20_000_000 # in bytes
# Enables registration. If set to false, no users can register on this server.
allow_registration = true
# Disable registration. No new users will be able to register on this server.
#allow_registration = false
# Disable encryption, so no new encrypted rooms can be created.
# Note: Existing rooms will continue to work.
@ -73,12 +73,10 @@ allow_registration = true @@ -73,12 +73,10 @@ allow_registration = true
# Enable jaeger to support monitoring and troubleshooting through jaeger.
#allow_jaeger = false
#cache_capacity = 1073741824 # in bytes, 1024 * 1024 * 1024
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
#workers = 4 # default: cpu core count * 2
# The total amount of memory that the database will use.
#db_cache_capacity_mb = 200
EOF
fi
;;

30
docker-compose.yml

@ -3,38 +3,38 @@ version: '3' @@ -3,38 +3,38 @@ version: '3'
services:
homeserver:
### If you already built the Conduit image with 'docker build' or want to use a registry image,
### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image,
### then you are ready to go.
image: matrixconduit/matrix-conduit:latest
#image: matrixconduit/matrix-conduit:latest
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
# build:
# context: .
# args:
# CREATED: '2021-03-16T08:18:27Z'
# VERSION: '0.1.0'
# LOCAL: 'false'
# GIT_REF: origin/master
build:
context: .
args:
CREATED: '2021-03-16T08:18:27Z'
VERSION: '0.1.0'
LOCAL: 'false'
GIT_REF: origin/master
restart: unless-stopped
ports:
- 8448:6167
- 8448:8000
volumes:
- db:/srv/conduit/.local/share/conduit
### Uncomment if you want to use conduit.toml to configure Conduit
### Note: Set env vars will override conduit.toml values
# - ./conduit.toml:/srv/conduit/conduit.toml
environment:
CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name
CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
CONDUIT_ALLOW_REGISTRATION: 'true'
### Uncomment and change values as desired
# CONDUIT_ADDRESS: 0.0.0.0
# CONDUIT_PORT: 6167
# CONDUIT_ADDRESS: 127.0.0.1
# CONDUIT_PORT: 8000
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
# CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off"
# CONDUIT_ALLOW_JAEGER: 'false'
# CONDUIT_ALLOW_REGISTRATION : 'false'
# CONDUIT_ALLOW_ENCRYPTION: 'false'
# CONDUIT_ALLOW_FEDERATION: 'false'
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
@ -56,4 +56,4 @@ services: @@ -56,4 +56,4 @@ services:
# - homeserver
volumes:
db:
db:

113
docker/README.md

@ -2,120 +2,73 @@ @@ -2,120 +2,73 @@
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
## Docker
### Build & Dockerfile
The Dockerfile provided by Conduit has two stages, each of which creates an image.
1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository.
2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
The Dockerfile includes a few build arguments that should be supplied when building it.
``` Dockerfile
ARG LOCAL=false
ARG CREATED
ARG VERSION
ARG GIT_REF=origin/master
```
- **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')`
- **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)`
- **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`.
- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `origin/master`.
To build the image you can use the following command
```bash
docker build --tag matrixconduit/matrix-conduit:latest .
``` bash
docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
```
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
**Note:** it ommits the two optional `build-arg`s.
### Run
After building the image you can simply run it with
```bash
docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest
``` bash
docker run -d -p 8448:8000 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest
```
or you can skip the build step and pull the image from one of the following registries:
| Registry | Image | Size |
| --------------- | --------------------------------------------------------------- | --------------------- |
| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] |
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] |
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
[gl]: https://gitlab.com/famedly/conduit/container_registry/
[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
For detached mode, you also need to use the `-d` flag. You also need to supply a `conduit.toml` config file, you can find an example [here](../conduit-example.toml).
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
too pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
## Docker-compose
If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying
Conduit can be found [here](../DEPLOY.md).
If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) including [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy.
### Build
To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with:
```bash
docker-compose up
``` bash
CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up
```
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section.
### Run
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
If you already have built the image, you can just start the container and everything else in the compose file in detached mode with:
```bash
``` bash
docker-compose up -d
```
> **Note:** Don't forget to modify and adjust the compose file to your needs.
### Use Traefik as Proxy
As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making containerized app and services available through the web. With the
two provided files, [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml), it is
equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is
the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to either expose ports
`443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`.
With the service `well-known` we use a single `nginx` container that will serve those two files.
So...step by step:
1. Copy [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) from the repository and remove `.traefik` from the filenames.
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
5. Create the files needed by the `well-known` service.
- `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping)
```nginx
server {
server_name <SUBDOMAIN>.<DOMAIN>;
listen 80 default_server;
location /.well-known/matrix/ {
root /var/www;
default_type application/json;
add_header Access-Control-Allow-Origin *;
}
}
```
- `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping)
```json
{
"m.homeserver": {
"base_url": "https://<SUBDOMAIN>.<DOMAIN>"
}
}
```
- `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping)
```json
{
"m.server": "<SUBDOMAIN>.<DOMAIN>:443"
}
```
6. Run `docker-compose up -d`
7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin.

78
docker/ci-binaries-packaging.Dockerfile

@ -1,78 +0,0 @@ @@ -1,78 +0,0 @@
# syntax=docker/dockerfile:1
# ---------------------------------------------------------------------------------------------------------
# This Dockerfile is intended to be built as part of Conduit's CI pipeline.
# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs.
#
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
# Credit's for the original Dockerfile: Weasy666.
# ---------------------------------------------------------------------------------------------------------
FROM docker.io/alpine:3.15.0 AS runner
# Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167
# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs.
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# Conduit needs:
# ca-certificates: for https
# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \
ca-certificates \
libgcc
ARG CREATED
ARG VERSION
ARG GIT_REF
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
# including a custom label specifying the build command
LABEL org.opencontainers.image.created=${CREATED} \
org.opencontainers.image.authors="Conduit Contributors" \
org.opencontainers.image.title="Conduit" \
org.opencontainers.image.version=${VERSION} \
org.opencontainers.image.vendor="Conduit Contributors" \
org.opencontainers.image.description="A Matrix homeserver written in Rust" \
org.opencontainers.image.url="https://conduit.rs/" \
org.opencontainers.image.revision=${GIT_REF} \
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
org.opencontainers.image.licenses="Apache-2.0" \
org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
org.opencontainers.image.ref.name=""
# Created directory for the database and media files
RUN mkdir -p /srv/conduit/.local/share/conduit
# Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64")
# copy the matching binary into this docker image
ARG TARGETPLATFORM
COPY ./$TARGETPLATFORM /srv/conduit/conduit
# Improve security: Don't run stuff as root, that does not need to run as root:
# Add www-data user and group with UID 82, as used by alpine
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
RUN set -x ; \
addgroup -Sg 82 www-data 2>/dev/null ; \
adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \
addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1
# Change ownership of Conduit files to www-data user and group
RUN chown -cR www-data:www-data /srv/conduit
RUN chmod +x /srv/conduit/healthcheck.sh
# Change user to www-data
USER www-data
# Set container home directory
WORKDIR /srv/conduit
# Run Conduit and print backtraces on panics
ENV RUST_BACKTRACE=1
ENTRYPOINT [ "/srv/conduit/conduit" ]

23
docker/docker-compose.override.traefik.yml

@ -10,29 +10,6 @@ services: @@ -10,29 +10,6 @@ services:
- "traefik.http.routers.to-conduit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Conduit is hosted
- "traefik.http.routers.to-conduit.tls=true"
- "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt"
- "traefik.http.routers.to-conduit.middlewares=cors-headers@docker"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
# and in the docker-compose file.
well-known:
labels:
- "traefik.enable=true"
- "traefik.docker.network=proxy"
- "traefik.http.routers.to-matrix-wellknown.rule=Host(`<SUBDOMAIN>.<DOMAIN>`) && PathPrefix(`/.well-known/matrix`)"
- "traefik.http.routers.to-matrix-wellknown.tls=true"
- "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt"
- "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
### Uncomment this if you uncommented Element-Web App in the docker-compose.yml
# element-web:

26
docker/docker-compose.traefik.yml

@ -12,8 +12,8 @@ services: @@ -12,8 +12,8 @@ services:
# build:
# context: .
# args:
# CREATED: '2021-03-16T08:18:27Z'
# VERSION: '0.1.0'
# CREATED:
# VERSION:
# LOCAL: 'false'
# GIT_REF: origin/master
restart: unless-stopped
@ -25,32 +25,22 @@ services: @@ -25,32 +25,22 @@ services:
networks:
- proxy
environment:
CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name
CONDUIT_SERVER_NAME: localhost:8000 # replace with your own name
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
CONDUIT_ALLOW_REGISTRATION : 'true'
### Uncomment and change values as desired
# CONDUIT_ADDRESS: 0.0.0.0
# CONDUIT_PORT: 6167
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
# CONDUIT_ADDRESS: 127.0.0.1
# CONDUIT_PORT: 8000
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if want to configure purely by env vars, set this to an empty string ''
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
# CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off"
# CONDUIT_ALLOW_JAEGER: 'false'
# CONDUIT_ALLOW_REGISTRATION : 'false'
# CONDUIT_ALLOW_ENCRYPTION: 'false'
# CONDUIT_ALLOW_FEDERATION: 'false'
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
# CONDUIT_WORKERS: 10
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
# and in the docker-compose override file.
well-known:
image: nginx:latest
restart: unless-stopped
volumes:
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files
- ./nginx/www:/var/www/ # location of the client and server .well-known-files
### Uncomment if you want to use your own Element-Web App.
### Note: You need to provide a config.json for Element and you also need a second
### Domain or Subdomain for the communication between Element and Conduit
@ -69,7 +59,7 @@ volumes: @@ -69,7 +59,7 @@ volumes:
db:
networks:
# This is the network Traefik listens to, if your network has a different
# This is the network Traefik listens to, if you network has a different
# name, don't forget to change it here and in the docker-compose.override.yml
proxy:
external: true

13
docker/healthcheck.sh

@ -1,13 +0,0 @@ @@ -1,13 +0,0 @@
#!/bin/sh
# If the port is not specified as env var, take it from the config file
if [ -z ${CONDUIT_PORT} ]; then
CONDUIT_PORT=$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')
fi
# The actual health check.
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
exit 1

2
rust-toolchain

@ -1 +1 @@ @@ -1 +1 @@
1.53
1.50.0

12
src/appservice_server.rs

@ -1,5 +1,6 @@ @@ -1,5 +1,6 @@
use crate::{utils, Error, Result};
use bytes::BytesMut;
use log::warn;
use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken};
use std::{
convert::{TryFrom, TryInto},
@ -7,9 +8,8 @@ use std::{ @@ -7,9 +8,8 @@ use std::{
mem,
time::Duration,
};
use tracing::warn;
pub(crate) async fn send_request<T: OutgoingRequest>(
pub async fn send_request<T: OutgoingRequest>(
globals: &crate::database::globals::Globals,
registration: serde_yaml::Value,
request: T,
@ -21,7 +21,7 @@ where @@ -21,7 +21,7 @@ where
let hs_token = registration.get("hs_token").unwrap().as_str().unwrap();
let mut http_request = request
.try_into_http_request::<BytesMut>(destination, SendAccessToken::IfRequired(""))
.try_into_http_request::<BytesMut>(&destination, SendAccessToken::IfRequired(""))
.unwrap()
.map(|body| body.freeze());
@ -46,11 +46,7 @@ where @@ -46,11 +46,7 @@ where
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
let url = reqwest_request.url().clone();
let mut response = globals
.reqwest_client()?
.build()?
.execute(reqwest_request)
.await?;
let mut response = globals.reqwest_client().execute(reqwest_request).await?;
// reqwest::Response -> http::Response conversion
let status = response.status();

310
src/client_server/account.rs

@ -1,38 +1,28 @@ @@ -1,38 +1,28 @@
use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
use super::{State, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma};
use log::info;
use ruma::{
api::client::{
error::ErrorKind,
r0::{
account::{
change_password, deactivate, get_3pids, get_username_availability, register,
whoami, ThirdPartyIdRemovalStatus,
change_password, deactivate, get_username_availability, register, whoami,
ThirdPartyIdRemovalStatus,
},
uiaa::{AuthFlow, AuthType, UiaaInfo},
uiaa::{AuthFlow, UiaaInfo},
},
},
events::{
room::{
canonical_alias::RoomCanonicalAliasEventContent,
create::RoomCreateEventContent,
guest_access::{GuestAccess, RoomGuestAccessEventContent},
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
join_rules::{JoinRule, RoomJoinRulesEventContent},
member::{MembershipState, RoomMemberEventContent},
message::RoomMessageEventContent,
name::RoomNameEventContent,
power_levels::RoomPowerLevelsEventContent,
topic::RoomTopicEventContent,
canonical_alias, guest_access, history_visibility, join_rules, member, message, name,
topic,
},
EventType,
},
identifiers::RoomName,
push, RoomAliasId, RoomId, RoomVersionId, UserId,
};
use serde_json::value::to_raw_value;
use tracing::info;
use register::RegistrationKind;
#[cfg(feature = "conduit_bin")]
@ -44,19 +34,15 @@ const GUEST_NAME_LENGTH: usize = 10; @@ -44,19 +34,15 @@ const GUEST_NAME_LENGTH: usize = 10;
///
/// Checks if a username is valid and available on this server.
///
/// Conditions for returning true:
/// - The user id is not historical
/// - The server name of the user id matches this server
/// - No user or appservice on this server already claimed this username
///
/// Note: This will not reserve the username, so the username might become invalid when trying to register
/// - Returns true if no user or appservice on this server claimed this username
/// - This will not reserve the username, so the username might become invalid when trying to register
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/register/available", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_register_available_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_username_availability::Request<'_>>,
) -> ConduitResult<get_username_availability::Response> {
// Validate user id
@ -88,22 +74,18 @@ pub async fn get_register_available_route( @@ -88,22 +74,18 @@ pub async fn get_register_available_route(
///
/// Register an account on this homeserver.
///
/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html)
/// to check if the user id is valid and available.
///
/// - Only works if registration is enabled
/// - If type is guest: ignores all parameters except initial_device_display_name
/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
/// - If type is not guest and no username is given: Always fails after UIAA check
/// - Creates a new account and populates it with default account data
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
/// - Returns the device id and access_token unless `inhibit_login` is true
/// - When registering a guest account, all parameters except initial_device_display_name will be
/// ignored
/// - Creates a new account and a device for it
/// - The account will be populated with default account data
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/register", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn register_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<register::Request<'_>>,
) -> ConduitResult<register::Response> {
if !db.globals.allow_registration() && !body.from_appservice {
@ -141,7 +123,7 @@ pub async fn register_route( @@ -141,7 +123,7 @@ pub async fn register_route(
))?;
// Check if username is creative enough
if db.users.exists(&user_id)? {
if !missing_username && db.users.exists(&user_id)? {
return Err(Error::BadRequest(
ErrorKind::UserInUse,
"Desired user ID is already taken.",
@ -151,7 +133,7 @@ pub async fn register_route( @@ -151,7 +133,7 @@ pub async fn register_route(
// UIAA
let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Dummy],
stages: vec!["m.login.dummy".to_owned()],
}],
completed: Vec::new(),
params: Default::default(),
@ -174,18 +156,16 @@ pub async fn register_route( @@ -174,18 +156,16 @@ pub async fn register_route(
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
} else {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
db.uiaa.create(
&UserId::parse_with_server_name("", db.globals.server_name())
.expect("we know this is valid"),
"".into(),
&uiaainfo,
&json,
&body.json_body.expect("body is json"),
)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
}
@ -205,12 +185,7 @@ pub async fn register_route( @@ -205,12 +185,7 @@ pub async fn register_route(
// Create user
db.users.create(&user_id, password)?;
// Default to pretty displayname
let displayname = format!("{} ⚡", user_id.localpart());
db.users
.set_displayname(&user_id, Some(displayname.clone()))?;
// Initial account data
// Initial data
db.account_data.update(
None,
&user_id,
@ -223,7 +198,6 @@ pub async fn register_route( @@ -223,7 +198,6 @@ pub async fn register_route(
&db.globals,
)?;
// Inhibit login does not work for guests
if !is_guest && body.inhibit_login {
return Ok(register::Response {
access_token: None,
@ -244,7 +218,7 @@ pub async fn register_route( @@ -244,7 +218,7 @@ pub async fn register_route(
// Generate new token for the device
let token = utils::random_string(TOKEN_LENGTH);
// Create device for this account
// Add device
db.users.create_device(
&user_id,
&device_id,
@ -252,7 +226,7 @@ pub async fn register_route( @@ -252,7 +226,7 @@ pub async fn register_route(
body.initial_device_display_name.clone(),
)?;
// If this is the first user on this server, create the admin room
// If this is the first user on this server, create the admins room
if db.users.count()? == 1 {
// Create a user for the server
let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name())
@ -262,28 +236,16 @@ pub async fn register_route( @@ -262,28 +236,16 @@ pub async fn register_route(
let room_id = RoomId::new(db.globals.server_name());
db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?;
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
let mut content = RoomCreateEventContent::new(conduit_user.clone());
let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone());
content.federate = true;
content.predecessor = None;
content.room_version = RoomVersionId::V6;
content.room_version = RoomVersionId::Version6;
// 1. The room create event
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomCreate,
content: to_raw_value(&content).expect("event is valid, we just created it"),
content: serde_json::to_value(content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
@ -291,22 +253,18 @@ pub async fn register_route( @@ -291,22 +253,18 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
// 2. Make conduit bot join
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
content: serde_json::to_value(member::MemberEventContent {
membership: member::MembershipState::Join,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
@ -316,7 +274,6 @@ pub async fn register_route( @@ -316,7 +274,6 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
// 3. Power levels
@ -327,10 +284,12 @@ pub async fn register_route( @@ -327,10 +284,12 @@ pub async fn register_route(
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomPowerLevels,
content: to_raw_value(&RoomPowerLevelsEventContent {
users,
..Default::default()
})
content: serde_json::to_value(
ruma::events::room::power_levels::PowerLevelsEventContent {
users,
..Default::default()
},
)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
@ -339,15 +298,16 @@ pub async fn register_route( @@ -339,15 +298,16 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
// 4.1 Join Rules
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomJoinRules,
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
.expect("event is valid, we just created it"),
content: serde_json::to_value(join_rules::JoinRulesEventContent::new(
join_rules::JoinRule::Invite,
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
@ -355,16 +315,17 @@ pub async fn register_route( @@ -355,16 +315,17 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
// 4.2 History Visibility
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomHistoryVisibility,
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
HistoryVisibility::Shared,
))
content: serde_json::to_value(
history_visibility::HistoryVisibilityEventContent::new(
history_visibility::HistoryVisibility::Shared,
),
)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
@ -373,15 +334,16 @@ pub async fn register_route( @@ -373,15 +334,16 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
// 4.3 Guest Access
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomGuestAccess,
content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
.expect("event is valid, we just created it"),
content: serde_json::to_value(guest_access::GuestAccessEventContent::new(
guest_access::GuestAccess::Forbidden,
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
@ -389,17 +351,18 @@ pub async fn register_route( @@ -389,17 +351,18 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
// 6. Events implied by name and topic
let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name()))
.expect("Room name is valid");
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
.expect("event is valid, we just created it"),
content: serde_json::to_value(
name::NameEventContent::new("Admin Room".to_owned()).map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.")
})?,
)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
@ -407,13 +370,12 @@ pub async fn register_route( @@ -407,13 +370,12 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomTopic,
content: to_raw_value(&RoomTopicEventContent {
content: serde_json::to_value(topic::TopicEventContent {
topic: format!("Manage {}", db.globals.server_name()),
})
.expect("event is valid, we just created it"),
@ -424,18 +386,17 @@ pub async fn register_route( @@ -424,18 +386,17 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
// Room alias
let alias: Box<RoomAliasId> = format!("#admins:{}", db.globals.server_name())
let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent {
content: serde_json::to_value(canonical_alias::CanonicalAliasEventContent {
alias: Some(alias.clone()),
alt_aliases: Vec::new(),
})
@ -447,7 +408,6 @@ pub async fn register_route( @@ -447,7 +408,6 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
@ -456,15 +416,12 @@ pub async fn register_route( @@ -456,15 +416,12 @@ pub async fn register_route(
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Invite,
content: serde_json::to_value(member::MemberEventContent {
membership: member::MembershipState::Invite,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
@ -474,20 +431,16 @@ pub async fn register_route( @@ -474,20 +431,16 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: Some(displayname),
content: serde_json::to_value(member::MemberEventContent {
membership: member::MembershipState::Join,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
@ -497,16 +450,15 @@ pub async fn register_route( @@ -497,16 +450,15 @@ pub async fn register_route(
&user_id,
&room_id,
&db,
&state_lock,
)?;
// Send welcome message
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMessage,
content: to_raw_value(&RoomMessageEventContent::text_html(
"## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(),
"<h2>Thank you for trying out Conduit!</h2>\n<p>Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.</p>\n<p>Helpful links:</p>\n<blockquote>\n<p>Website: https://conduit.rs<br>Git and Documentation: https://gitlab.com/famedly/conduit<br>Report issues: https://gitlab.com/famedly/conduit/-/issues</p>\n</blockquote>\n<p>Here are some rooms you can join (by typing the command):</p>\n<p>Conduit room (Ask questions and get notified on updates):<br><code>/join #conduit:fachschaften.org</code></p>\n<p>Conduit lounge (Off-topic, only Conduit users are allowed to join)<br><code>/join #conduit-lounge:conduit.rs</code></p>\n".to_owned(),
content: serde_json::to_value(message::MessageEventContent::text_html(
"Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing `/join #conduit:matrix.org`. **Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.** Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(),
"Thanks for trying out Conduit! This software is still in development, so expect many bugs and missing features. If you have federation enabled, you can join the Conduit chat room by typing <code>/join #conduit:matrix.org</code>. <strong>Important: Please don't join any other Matrix rooms over federation without permission from the room's admins.</strong> Some actions might trigger bugs in other server implementations, breaking the chat for everyone else.".to_owned(),
))
.expect("event is valid, we just created it"),
unsigned: None,
@ -516,13 +468,12 @@ pub async fn register_route( @@ -516,13 +468,12 @@ pub async fn register_route(
&conduit_user,
&room_id,
&db,
&state_lock,
)?;
}
info!("{} registered on this server", user_id);
db.flush()?;
db.flush().await?;
Ok(register::Response {
access_token: Some(token),
@ -536,23 +487,16 @@ pub async fn register_route( @@ -536,23 +487,16 @@ pub async fn register_route(
///
/// Changes the password of this account.
///
/// - Requires UIAA to verify user password
/// - Changes the password of the sender user
/// - The password hash is calculated using argon2 with 32 character salt, the plain password is
/// not saved
///
/// If logout_devices is true it does the following for each device except the sender device:
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
/// - Invalidates all other access tokens if logout_devices is true
/// - Deletes all other devices and most of their data (to-device events, last seen, etc.) if
/// logout_devices is true
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/account/password", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn change_password_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<change_password::Request<'_>>,
) -> ConduitResult<change_password::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -560,7 +504,7 @@ pub async fn change_password_route( @@ -560,7 +504,7 @@ pub async fn change_password_route(
let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
stages: vec!["m.login.password".to_owned()],
}],
completed: Vec::new(),
params: Default::default(),
@ -570,7 +514,7 @@ pub async fn change_password_route( @@ -570,7 +514,7 @@ pub async fn change_password_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = db.uiaa.try_auth(
sender_user,
&sender_user,
sender_device,
auth,
&uiaainfo,
@ -581,40 +525,42 @@ pub async fn change_password_route( @@ -581,40 +525,42 @@ pub async fn change_password_route(
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
} else {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
db.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
db.uiaa.create(
&sender_user,
&sender_device,
&uiaainfo,
&body.json_body.expect("body is json"),
)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
db.users
.set_password(sender_user, Some(&body.new_password))?;
.set_password(&sender_user, Some(&body.new_password))?;
if body.logout_devices {
// Logout all devices except the current one
for id in db
.users
.all_device_ids(sender_user)
.all_device_ids(&sender_user)
.filter_map(|id| id.ok())
.filter(|id| id != sender_device)
{
db.users.remove_device(sender_user, &id)?;
db.users.remove_device(&sender_user, &id)?;
}
}
db.flush()?;
db.flush().await?;
Ok(change_password::Response {}.into())
Ok(change_password::Response.into())
}
/// # `GET _matrix/client/r0/account/whoami`
///
/// Get user_id of the sender user.
/// Get user_id of this account.
///
/// Note: Also works for Application Services
/// - Also works for Application Services
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/account/whoami", data = "<body>")
@ -630,13 +576,11 @@ pub async fn whoami_route(body: Ruma<whoami::Request>) -> ConduitResult<whoami:: @@ -630,13 +576,11 @@ pub async fn whoami_route(body: Ruma<whoami::Request>) -> ConduitResult<whoami::
/// # `POST /_matrix/client/r0/account/deactivate`
///
/// Deactivate sender user account.
/// Deactivate this user's account
///
/// - Leaves all rooms and rejects all invitations
/// - Invalidates all access tokens
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets all to-device events
/// - Triggers device list updates
/// - Deletes all devices
/// - Removes ability to log in again
#[cfg_attr(
feature = "conduit_bin",
@ -644,7 +588,7 @@ pub async fn whoami_route(body: Ruma<whoami::Request>) -> ConduitResult<whoami:: @@ -644,7 +588,7 @@ pub async fn whoami_route(body: Ruma<whoami::Request>) -> ConduitResult<whoami::
)]
#[tracing::instrument(skip(db, body))]
pub async fn deactivate_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<deactivate::Request<'_>>,
) -> ConduitResult<deactivate::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -652,7 +596,7 @@ pub async fn deactivate_route( @@ -652,7 +596,7 @@ pub async fn deactivate_route(
let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
stages: vec!["m.login.password".to_owned()],
}],
completed: Vec::new(),
params: Default::default(),
@ -662,8 +606,8 @@ pub async fn deactivate_route( @@ -662,8 +606,8 @@ pub async fn deactivate_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = db.uiaa.try_auth(
sender_user,
sender_device,
&sender_user,
&sender_device,
auth,
&uiaainfo,
&db.users,
@ -673,71 +617,52 @@ pub async fn deactivate_route( @@ -673,71 +617,52 @@ pub async fn deactivate_route(
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
} else {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
db.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
db.uiaa.create(
&sender_user,
&sender_device,
&uiaainfo,
&body.json_body.expect("body is json"),
)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
// Leave all joined rooms and reject all invitations
// TODO: work over federation invites
let all_rooms = db
.rooms
.rooms_joined(sender_user)
.chain(
db.rooms
.rooms_invited(sender_user)
.map(|t| t.map(|(r, _)| r)),
)
.collect::<Vec<_>>();
for room_id in all_rooms {
for room_id in db.rooms.rooms_joined(&sender_user).chain(
db.rooms
.rooms_invited(&sender_user)
.map(|t| t.map(|(r, _)| r)),
) {
let room_id = room_id?;
let event = RoomMemberEventContent {
membership: MembershipState::Leave,
let event = member::MemberEventContent {
membership: member::MembershipState::Leave,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
};
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMember,
content: to_raw_value(&event).expect("event is valid, we just created it"),
content: serde_json::to_value(event).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(sender_user.to_string()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
}
// Remove devices and mark account as deactivated
db.users.deactivate_account(sender_user)?;
db.users.deactivate_account(&sender_user)?;
info!("{} deactivated their account", sender_user);
db.flush()?;
db.flush().await?;
Ok(deactivate::Response {
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
@ -745,19 +670,16 @@ pub async fn deactivate_route( @@ -745,19 +670,16 @@ pub async fn deactivate_route(
.into())
}
/// # `GET _matrix/client/r0/account/3pid`
///
/// Get a list of third party identifiers associated with this account.
///
/// - Currently always returns empty list
/*/
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/account/3pid", data = "<body>")
)]
pub async fn third_party_route(
body: Ruma<get_3pids::Request>,
) -> ConduitResult<get_3pids::Response> {
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
body: Ruma<account::add_3pid::Request<'_>>,
) -> ConduitResult<account::add_3pid::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(get_3pids::Response::new(Vec::new()).into())
Ok(account::add_3pid::Response::default().into())
}
*/

54
src/client_server/alias.rs

@ -1,4 +1,7 @@ @@ -1,4 +1,7 @@
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Ruma};
use std::sync::Arc;
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use regex::Regex;
use ruma::{
api::{
@ -15,25 +18,15 @@ use ruma::{ @@ -15,25 +18,15 @@ use ruma::{
#[cfg(feature = "conduit_bin")]
use rocket::{delete, get, put};
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
///
/// Creates a new room alias on this server.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/directory/room/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn create_alias_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<create_alias::Request<'_>>,
) -> ConduitResult<create_alias::Response> {
if body.room_alias.server_name() != db.globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Alias is from another server.",
));
}
if db.rooms.id_from_alias(&body.room_alias)?.is_some() {
return Err(Error::Conflict("Alias already exists."));
}
@ -41,60 +34,40 @@ pub async fn create_alias_route( @@ -41,60 +34,40 @@ pub async fn create_alias_route(
db.rooms
.set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?;
db.flush()?;
db.flush().await?;
Ok(create_alias::Response::new().into())
}
/// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}`
///
/// Deletes a room alias from this server.
///
/// - TODO: additional access control checks
/// - TODO: Update canonical alias event
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/r0/directory/room/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_alias_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_alias::Request<'_>>,
) -> ConduitResult<delete_alias::Response> {
if body.room_alias.server_name() != db.globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Alias is from another server.",
));
}
db.rooms.set_alias(&body.room_alias, None, &db.globals)?;
// TODO: update alt_aliases?
db.flush()?;
db.flush().await?;
Ok(delete_alias::Response::new().into())
}
/// # `GET /_matrix/client/r0/directory/room/{roomAlias}`
///
/// Resolve an alias locally or over federation.
///
/// - TODO: Suggest more servers to join via
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/directory/room/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_alias_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_alias::Request<'_>>,
) -> ConduitResult<get_alias::Response> {
get_alias_helper(&db, &body.room_alias).await
}
pub(crate) async fn get_alias_helper(
pub async fn get_alias_helper(
db: &Database,
room_alias: &RoomAliasId,
) -> ConduitResult<get_alias::Response> {
@ -112,10 +85,11 @@ pub(crate) async fn get_alias_helper( @@ -112,10 +85,11 @@ pub(crate) async fn get_alias_helper(
}
let mut room_id = None;
match db.rooms.id_from_alias(room_alias)? {
match db.rooms.id_from_alias(&room_alias)? {
Some(r) => room_id = Some(r),
None => {
for (_id, registration) in db.appservice.all()? {
let iter = db.appservice.iter_all()?;
for (_id, registration) in iter.filter_map(|r| r.ok()) {
let aliases = registration
.get("namespaces")
.and_then(|ns| ns.get("aliases"))
@ -140,7 +114,7 @@ pub(crate) async fn get_alias_helper( @@ -140,7 +114,7 @@ pub(crate) async fn get_alias_helper(
.await
.is_ok()
{
room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| {
room_id = Some(db.rooms.id_from_alias(&room_alias)?.ok_or_else(|| {
Error::bad_config("Appservice lied to us. Room does not exist.")
})?);
break;

183
src/client_server/backup.rs

@ -1,4 +1,7 @@ @@ -1,4 +1,7 @@
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use std::sync::Arc;
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::api::client::{
error::ErrorKind,
r0::backup::{
@ -12,66 +15,57 @@ use ruma::api::client::{ @@ -12,66 +15,57 @@ use ruma::api::client::{
#[cfg(feature = "conduit_bin")]
use rocket::{delete, get, post, put};
/// # `POST /_matrix/client/r0/room_keys/version`
///
/// Creates a new backup.
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/unstable/room_keys/version", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn create_backup_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<create_backup::Request>,
) -> ConduitResult<create_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let version = db
.key_backups
.create_backup(sender_user, &body.algorithm, &db.globals)?;
.create_backup(&sender_user, &body.algorithm, &db.globals)?;
db.flush()?;
db.flush().await?;
Ok(create_backup::Response { version }.into())
}
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
///
/// Update information about an existing backup. Only `auth_data` can be modified.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn update_backup_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<update_backup::Request<'_>>,
) -> ConduitResult<update_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
db.key_backups
.update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?;
.update_backup(&sender_user, &body.version, &body.algorithm, &db.globals)?;
db.flush()?;
db.flush().await?;
Ok(update_backup::Response {}.into())
Ok(update_backup::Response.into())
}
/// # `GET /_matrix/client/r0/room_keys/version`
///
/// Get information about the latest backup version.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/unstable/room_keys/version", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_latest_backup_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_latest_backup::Request>,
) -> ConduitResult<get_latest_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let (version, algorithm) =
db.key_backups
.get_latest_backup(sender_user)?
.get_latest_backup(&sender_user)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Key backup does not exist.",
@ -86,22 +80,19 @@ pub async fn get_latest_backup_route( @@ -86,22 +80,19 @@ pub async fn get_latest_backup_route(
.into())
}
/// # `GET /_matrix/client/r0/room_keys/version`
///
/// Get information about an existing backup.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_backup_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_backup::Request<'_>>,
) -> ConduitResult<get_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let algorithm = db
.key_backups
.get_backup(sender_user, &body.version)?
.get_backup(&sender_user, &body.version)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Key backup does not exist.",
@ -116,73 +107,50 @@ pub async fn get_backup_route( @@ -116,73 +107,50 @@ pub async fn get_backup_route(
.into())
}
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
///
/// Delete an existing key backup.
///
/// - Deletes both information about the backup, as well as all key data related to the backup
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_backup_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_backup::Request<'_>>,
) -> ConduitResult<delete_backup::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
db.key_backups.delete_backup(sender_user, &body.version)?;
db.key_backups.delete_backup(&sender_user, &body.version)?;
db.flush()?;
db.flush().await?;
Ok(delete_backup::Response {}.into())
Ok(delete_backup::Response.into())
}
/// # `PUT /_matrix/client/r0/room_keys/keys`
///
/// Add the received backup keys to the database.
///
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/unstable/room_keys/keys", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn add_backup_keys_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<add_backup_keys::Request<'_>>,
) -> ConduitResult<add_backup_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
!= db
.key_backups
.get_latest_backup_version(sender_user)?
.as_ref()
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the backup.",
));
}
for (room_id, room) in &body.rooms {
for (session_id, key_data) in &room.sessions {
db.key_backups.add_key(
sender_user,
&sender_user,
&body.version,
room_id,
session_id,
key_data,
&room_id,
&session_id,
&key_data,
&db.globals,
)?
}
}
db.flush()?;
db.flush().await?;
Ok(add_backup_keys::Response {
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
@ -191,48 +159,30 @@ pub async fn add_backup_keys_route( @@ -191,48 +159,30 @@ pub async fn add_backup_keys_route(
.into())
}
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
///
/// Add the received backup keys to the database.
///
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn add_backup_key_sessions_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<add_backup_key_sessions::Request<'_>>,
) -> ConduitResult<add_backup_key_sessions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
!= db
.key_backups
.get_latest_backup_version(sender_user)?
.as_ref()
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the backup.",
));
}
for (session_id, key_data) in &body.sessions {
db.key_backups.add_key(
sender_user,
&sender_user,
&body.version,
&body.room_id,
session_id,
key_data,
&session_id,
&key_data,
&db.globals,
)?
}
db.flush()?;
db.flush().await?;
Ok(add_backup_key_sessions::Response {
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
@ -241,38 +191,20 @@ pub async fn add_backup_key_sessions_route( @@ -241,38 +191,20 @@ pub async fn add_backup_key_sessions_route(
.into())
}
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
///
/// Add the received backup key to the database.
///
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn add_backup_key_session_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<add_backup_key_session::Request<'_>>,
) -> ConduitResult<add_backup_key_session::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
!= db
.key_backups
.get_latest_backup_version(sender_user)?
.as_ref()
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the backup.",
));
}
db.key_backups.add_key(
sender_user,
&sender_user,
&body.version,
&body.room_id,
&body.session_id,
@ -280,7 +212,7 @@ pub async fn add_backup_key_session_route( @@ -280,7 +212,7 @@ pub async fn add_backup_key_session_route(
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(add_backup_key_session::Response {
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
@ -289,63 +221,54 @@ pub async fn add_backup_key_session_route( @@ -289,63 +221,54 @@ pub async fn add_backup_key_session_route(
.into())
}
/// # `GET /_matrix/client/r0/room_keys/keys`
///
/// Retrieves all keys from the backup.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/unstable/room_keys/keys", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_backup_keys_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_backup_keys::Request<'_>>,
) -> ConduitResult<get_backup_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let rooms = db.key_backups.get_all(sender_user, &body.version)?;
let rooms = db.key_backups.get_all(&sender_user, &body.version)?;
Ok(get_backup_keys::Response { rooms }.into())
}
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
///
/// Retrieves all keys from the backup for a given room.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_backup_key_sessions_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_backup_key_sessions::Request<'_>>,
) -> ConduitResult<get_backup_key_sessions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sessions = db
.key_backups
.get_room(sender_user, &body.version, &body.room_id)?;
.get_room(&sender_user, &body.version, &body.room_id)?;
Ok(get_backup_key_sessions::Response { sessions }.into())
}
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
///
/// Retrieves a key from the backup.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_backup_key_session_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_backup_key_session::Request<'_>>,
) -> ConduitResult<get_backup_key_session::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let key_data = db
.key_backups
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
.get_session(&sender_user, &body.version, &body.room_id, &body.session_id)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Backup key not found for this user's session.",
@ -354,23 +277,21 @@ pub async fn get_backup_key_session_route( @@ -354,23 +277,21 @@ pub async fn get_backup_key_session_route(
Ok(get_backup_key_session::Response { key_data }.into())
}
/// # `DELETE /_matrix/client/r0/room_keys/keys`
///
/// Delete the keys from the backup.
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/unstable/room_keys/keys", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_backup_keys_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_backup_keys::Request<'_>>,
) -> ConduitResult<delete_backup_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
db.key_backups.delete_all_keys(sender_user, &body.version)?;
db.key_backups
.delete_all_keys(&sender_user, &body.version)?;
db.flush()?;
db.flush().await?;
Ok(delete_backup_keys::Response {
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
@ -379,24 +300,21 @@ pub async fn delete_backup_keys_route( @@ -379,24 +300,21 @@ pub async fn delete_backup_keys_route(
.into())
}
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
///
/// Delete the keys from the backup for a given room.
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_backup_key_sessions_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_backup_key_sessions::Request<'_>>,
) -> ConduitResult<delete_backup_key_sessions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
db.key_backups
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
.delete_room_keys(&sender_user, &body.version, &body.room_id)?;
db.flush()?;
db.flush().await?;
Ok(delete_backup_key_sessions::Response {
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
@ -405,24 +323,21 @@ pub async fn delete_backup_key_sessions_route( @@ -405,24 +323,21 @@ pub async fn delete_backup_key_sessions_route(
.into())
}
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
///
/// Delete a key from the backup.
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_backup_key_session_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_backup_key_session::Request<'_>>,
) -> ConduitResult<delete_backup_key_session::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
db.key_backups
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
.delete_room_key(&sender_user, &body.version, &body.room_id, &body.session_id)?;
db.flush()?;
db.flush().await?;
Ok(delete_backup_key_session::Response {
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),

20
src/client_server/capabilities.rs

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
use crate::{ConduitResult, Ruma};
use crate::ConduitResult;
use ruma::{
api::client::r0::capabilities::{
get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability,
@ -12,22 +12,16 @@ use rocket::get; @@ -12,22 +12,16 @@ use rocket::get;
/// # `GET /_matrix/client/r0/capabilities`
///
/// Get information on the supported feature set and other relevent capabilities of this server.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/capabilities", data = "<_body>")
)]
#[tracing::instrument(skip(_body))]
pub async fn get_capabilities_route(
_body: Ruma<get_capabilities::Request>,
) -> ConduitResult<get_capabilities::Response> {
/// Get information on this server's supported feature set and other relevent capabilities.
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/capabilities"))]
#[tracing::instrument]
pub async fn get_capabilities_route() -> ConduitResult<get_capabilities::Response> {
let mut available = BTreeMap::new();
available.insert(RoomVersionId::V5, RoomVersionStability::Stable);
available.insert(RoomVersionId::V6, RoomVersionStability::Stable);
available.insert(RoomVersionId::Version6, RoomVersionStability::Stable);
let mut capabilities = Capabilities::new();
capabilities.room_versions = RoomVersionsCapability {
default: RoomVersionId::V6,
default: RoomVersionId::Version6,
available,
};

47
src/client_server/config.rs

@ -1,4 +1,7 @@ @@ -1,4 +1,7 @@
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use std::sync::Arc;
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
@ -16,21 +19,18 @@ use serde_json::{json, value::RawValue as RawJsonValue}; @@ -16,21 +19,18 @@ use serde_json::{json, value::RawValue as RawJsonValue};
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};
/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
///
/// Sets some account data for the sender user.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_global_account_data_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_global_account_data::Request<'_>>,
) -> ConduitResult<set_global_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let data: serde_json::Value = serde_json::from_str(body.data.get())
let data = serde_json::from_str::<serde_json::Value>(body.data.get())
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
let event_type = body.event_type.to_string();
@ -46,14 +46,11 @@ pub async fn set_global_account_data_route( @@ -46,14 +46,11 @@ pub async fn set_global_account_data_route(
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(set_global_account_data::Response {}.into())
Ok(set_global_account_data::Response.into())
}
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
///
/// Sets some room account data for the sender user.
#[cfg_attr(
feature = "conduit_bin",
put(
@ -63,12 +60,12 @@ pub async fn set_global_account_data_route( @@ -63,12 +60,12 @@ pub async fn set_global_account_data_route(
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_room_account_data_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_room_account_data::Request<'_>>,
) -> ConduitResult<set_room_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let data: serde_json::Value = serde_json::from_str(body.data.get())
let data = serde_json::from_str::<serde_json::Value>(body.data.get())
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
let event_type = body.event_type.to_string();
@ -84,29 +81,27 @@ pub async fn set_room_account_data_route( @@ -84,29 +81,27 @@ pub async fn set_room_account_data_route(
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(set_room_account_data::Response {}.into())
Ok(set_room_account_data::Response.into())
}
/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}`
///
/// Gets some account data for the sender user.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_global_account_data_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_global_account_data::Request<'_>>,
) -> ConduitResult<get_global_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event: Box<RawJsonValue> = db
let event = db
.account_data
.get(None, sender_user, body.event_type.clone().into())?
.get::<Box<RawJsonValue>>(None, sender_user, body.event_type.clone().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
db.flush().await?;
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
@ -115,9 +110,6 @@ pub async fn get_global_account_data_route( @@ -115,9 +110,6 @@ pub async fn get_global_account_data_route(
Ok(get_global_account_data::Response { account_data }.into())
}
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
///
/// Gets some room account data for the sender user.
#[cfg_attr(
feature = "conduit_bin",
get(
@ -127,19 +119,20 @@ pub async fn get_global_account_data_route( @@ -127,19 +119,20 @@ pub async fn get_global_account_data_route(
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_room_account_data_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_room_account_data::Request<'_>>,
) -> ConduitResult<get_room_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event: Box<RawJsonValue> = db
let event = db
.account_data
.get(
.get::<Box<RawJsonValue>>(
Some(&body.room_id),
sender_user,
body.event_type.clone().into(),
)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
db.flush().await?;
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?

33
src/client_server/context.rs

@ -1,23 +1,18 @@ @@ -1,23 +1,18 @@
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::api::client::{error::ErrorKind, r0::context::get_context};
use std::convert::TryFrom;
use std::{convert::TryFrom, sync::Arc};
#[cfg(feature = "conduit_bin")]
use rocket::get;
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
///
/// Allows loading room history around an event.
///
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
/// joined, depending on history_visibility)
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_context_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_context::Request<'_>>,
) -> ConduitResult<get_context::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -48,9 +43,9 @@ pub async fn get_context_route( @@ -48,9 +43,9 @@ pub async fn get_context_route(
))?
.to_room_event();
let events_before: Vec<_> = db
let events_before = db
.rooms
.pdus_until(sender_user, &body.room_id, base_token)?
.pdus_until(&sender_user, &body.room_id, base_token)
.take(
u32::try_from(body.limit).map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
@ -58,21 +53,21 @@ pub async fn get_context_route( @@ -58,21 +53,21 @@ pub async fn get_context_route(
/ 2,
)
.filter_map(|r| r.ok()) // Remove buggy events
.collect();
.collect::<Vec<_>>();
let start_token = events_before
.last()
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
.map(|count| count.to_string());
let events_before: Vec<_> = events_before
let events_before = events_before
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
.collect::<Vec<_>>();
let events_after: Vec<_> = db
let events_after = db
.rooms
.pdus_after(sender_user, &body.room_id, base_token)?
.pdus_after(&sender_user, &body.room_id, base_token)
.take(
u32::try_from(body.limit).map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
@ -80,17 +75,17 @@ pub async fn get_context_route( @@ -80,17 +75,17 @@ pub async fn get_context_route(
/ 2,
)
.filter_map(|r| r.ok()) // Remove buggy events
.collect();
.collect::<Vec<_>>();
let end_token = events_after
.last()
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
.map(|count| count.to_string());
let events_after: Vec<_> = events_after
let events_after = events_after
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
.collect::<Vec<_>>();
let mut resp = get_context::Response::new();
resp.start = start_token;

108
src/client_server/device.rs

@ -1,9 +1,12 @@ @@ -1,9 +1,12 @@
use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma};
use std::sync::Arc;
use super::State;
use crate::{utils, ConduitResult, Database, Error, Ruma};
use ruma::api::client::{
error::ErrorKind,
r0::{
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
uiaa::{AuthFlow, AuthType, UiaaInfo},
uiaa::{AuthFlow, UiaaInfo},
},
};
@ -11,96 +14,78 @@ use super::SESSION_ID_LENGTH; @@ -11,96 +14,78 @@ use super::SESSION_ID_LENGTH;
#[cfg(feature = "conduit_bin")]
use rocket::{delete, get, post, put};
/// # `GET /_matrix/client/r0/devices`
///
/// Get metadata on all devices of the sender user.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/devices", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_devices_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_devices::Request>,
) -> ConduitResult<get_devices::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let devices: Vec<device::Device> = db
let devices = db
.users
.all_devices_metadata(sender_user)
.filter_map(|r| r.ok()) // Filter out buggy devices
.collect();
.collect::<Vec<device::Device>>();
Ok(get_devices::Response { devices }.into())
}
/// # `GET /_matrix/client/r0/devices/{deviceId}`
///
/// Get metadata on a single device of the sender user.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/devices/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_device_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_device::Request<'_>>,
) -> ConduitResult<get_device::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let device = db
.users
.get_device_metadata(sender_user, &body.body.device_id)?
.get_device_metadata(&sender_user, &body.body.device_id)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
Ok(get_device::Response { device }.into())
}
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
///
/// Updates the metadata on a given device of the sender user.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/devices/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn update_device_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<update_device::Request<'_>>,
) -> ConduitResult<update_device::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut device = db
.users
.get_device_metadata(sender_user, &body.device_id)?
.get_device_metadata(&sender_user, &body.device_id)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
device.display_name = body.display_name.clone();
db.users
.update_device_metadata(sender_user, &body.device_id, &device)?;
.update_device_metadata(&sender_user, &body.device_id, &device)?;
db.flush()?;
db.flush().await?;
Ok(update_device::Response {}.into())
Ok(update_device::Response.into())
}
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
///
/// Deletes the given device.
///
/// - Requires UIAA to verify user password
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/r0/devices/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_device_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_device::Request<'_>>,
) -> ConduitResult<delete_device::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -109,7 +94,7 @@ pub async fn delete_device_route( @@ -109,7 +94,7 @@ pub async fn delete_device_route(
// UIAA
let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
stages: vec!["m.login.password".to_owned()],
}],
completed: Vec::new(),
params: Default::default(),
@ -119,8 +104,8 @@ pub async fn delete_device_route( @@ -119,8 +104,8 @@ pub async fn delete_device_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = db.uiaa.try_auth(
sender_user,
sender_device,
&sender_user,
&sender_device,
auth,
&uiaainfo,
&db.users,
@ -130,40 +115,31 @@ pub async fn delete_device_route( @@ -130,40 +115,31 @@ pub async fn delete_device_route(
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
} else {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
db.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
db.uiaa.create(
&sender_user,
&sender_device,
&uiaainfo,
&body.json_body.expect("body is json"),
)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
db.users.remove_device(sender_user, &body.device_id)?;
db.users.remove_device(&sender_user, &body.device_id)?;
db.flush()?;
db.flush().await?;
Ok(delete_device::Response {}.into())
Ok(delete_device::Response.into())
}
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
///
/// Deletes the given device.
///
/// - Requires UIAA to verify user password
///
/// For each device:
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/delete_devices", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_devices_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_devices::Request<'_>>,
) -> ConduitResult<delete_devices::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -172,7 +148,7 @@ pub async fn delete_devices_route( @@ -172,7 +148,7 @@ pub async fn delete_devices_route(
// UIAA
let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
stages: vec!["m.login.password".to_owned()],
}],
completed: Vec::new(),
params: Default::default(),
@ -182,8 +158,8 @@ pub async fn delete_devices_route( @@ -182,8 +158,8 @@ pub async fn delete_devices_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = db.uiaa.try_auth(
sender_user,
sender_device,
&sender_user,
&sender_device,
auth,
&uiaainfo,
&db.users,
@ -193,20 +169,22 @@ pub async fn delete_devices_route( @@ -193,20 +169,22 @@ pub async fn delete_devices_route(
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
} else {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
db.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
db.uiaa.create(
&sender_user,
&sender_device,
&uiaainfo,
&body.json_body.expect("body is json"),
)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
for device_id in &body.devices {
db.users.remove_device(sender_user, device_id)?
db.users.remove_device(&sender_user, &device_id)?
}
db.flush()?;
db.flush().await?;
Ok(delete_devices::Response {}.into())
Ok(delete_devices::Response.into())
}

300
src/client_server/directory.rs

@ -1,6 +1,8 @@ @@ -1,6 +1,8 @@
use std::convert::TryInto;
use std::sync::Arc;
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Result, Ruma};
use log::info;
use ruma::{
api::{
client::{
@ -17,35 +19,23 @@ use ruma::{ @@ -17,35 +19,23 @@ use ruma::{
},
directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork},
events::{
room::{
avatar::RoomAvatarEventContent,
canonical_alias::RoomCanonicalAliasEventContent,
guest_access::{GuestAccess, RoomGuestAccessEventContent},
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
name::RoomNameEventContent,
topic::RoomTopicEventContent,
},
room::{avatar, canonical_alias, guest_access, history_visibility, name, topic},
EventType,
},
serde::Raw,
ServerName, UInt,
};
use tracing::{info, warn};
#[cfg(feature = "conduit_bin")]
use rocket::{get, post, put};
/// # `POST /_matrix/client/r0/publicRooms`
///
/// Lists the public rooms on this server.
///
/// - Rooms are ordered by the number of joined members
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/publicRooms", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_public_rooms_filtered_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_public_rooms_filtered::Request<'_>>,
) -> ConduitResult<get_public_rooms_filtered::Response> {
get_public_rooms_filtered_helper(
@ -59,18 +49,13 @@ pub async fn get_public_rooms_filtered_route( @@ -59,18 +49,13 @@ pub async fn get_public_rooms_filtered_route(
.await
}
/// # `GET /_matrix/client/r0/publicRooms`
///
/// Lists the public rooms on this server.
///
/// - Rooms are ordered by the number of joined members
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/publicRooms", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_public_rooms_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_public_rooms::Request<'_>>,
) -> ConduitResult<get_public_rooms::Response> {
let response = get_public_rooms_filtered_helper(
@ -93,51 +78,43 @@ pub async fn get_public_rooms_route( @@ -93,51 +78,43 @@ pub async fn get_public_rooms_route(
.into())
}
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
///
/// Sets the visibility of a given room in the room directory.
///
/// - TODO: Access control checks
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/directory/list/room/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_room_visibility_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_room_visibility::Request<'_>>,
) -> ConduitResult<set_room_visibility::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
match &body.visibility {
room::Visibility::Public => {
db.rooms.set_public(&body.room_id, true)?;
info!("{} made {} public", sender_user, body.room_id);
}
room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?,
_ => {
room::Visibility::_Custom(_s) => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Room visibility type is not supported.",
));
}
room::Visibility::Public => {
db.rooms.set_public(&body.room_id, true)?;
info!("{} made {} public", sender_user, body.room_id);
}
room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?,
}
db.flush()?;
db.flush().await?;
Ok(set_room_visibility::Response {}.into())
Ok(set_room_visibility::Response.into())
}
/// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
///
/// Gets the visibility of a given room in the room directory.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/directory/list/room/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_room_visibility_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_room_visibility::Request<'_>>,
) -> ConduitResult<get_room_visibility::Response> {
Ok(get_room_visibility::Response {
@ -150,7 +127,7 @@ pub async fn get_room_visibility_route( @@ -150,7 +127,7 @@ pub async fn get_room_visibility_route(
.into())
}
pub(crate) async fn get_public_rooms_filtered_helper(
pub async fn get_public_rooms_filtered_helper(
db: &Database,
server: Option<&ServerName>,
limit: Option<UInt>,
@ -167,7 +144,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( @@ -167,7 +144,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
other_server,
federation::directory::get_public_rooms_filtered::v1::Request {
limit,
since,
since: since.as_deref(),
filter: Filter {
generic_search_term: filter.generic_search_term.as_deref(),
},
@ -223,139 +200,160 @@ pub(crate) async fn get_public_rooms_filtered_helper( @@ -223,139 +200,160 @@ pub(crate) async fn get_public_rooms_filtered_helper(
}
}
let mut all_rooms: Vec<_> = db
.rooms
.public_rooms()
.map(|room_id| {
let room_id = room_id?;
let mut all_rooms =
db.rooms
.public_rooms()
.map(|room_id| {
let room_id = room_id?;
let chunk = PublicRoomsChunk {
aliases: Vec::new(),
canonical_alias: db
.rooms
.room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomCanonicalAliasEventContent| c.alias)
let chunk = PublicRoomsChunk {
aliases: Vec::new(),
canonical_alias: db
.rooms
.room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(serde_json::from_value::<
Raw<canonical_alias::CanonicalAliasEventContent>,
>(s.content.clone())
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid canonical alias event in database.")
})
})?,
name: db
.rooms
.room_state_get(&room_id, &EventType::RoomName, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomNameEventContent| c.name)
})?
.alias)
})?,
name: db
.rooms
.room_state_get(&room_id, &EventType::RoomName, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(serde_json::from_value::<Raw<name::NameEventContent>>(
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid room name event in database.")
})
})?,
num_joined_members: db
.rooms
.room_joined_count(&room_id)?
.unwrap_or_else(|| {
warn!("Room {} has no member count", room_id);
0
})
.try_into()
.expect("user count should not be that big"),
topic: db
.rooms
.room_state_get(&room_id, &EventType::RoomTopic, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomTopicEventContent| Some(c.topic))
.map_err(|_| {
Error::bad_database("Invalid room topic event in database.")
})
})?,
world_readable: db
.rooms
.room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| {
c.history_visibility == HistoryVisibility::WorldReadable
})
})?
.name()
.map(|n| n.to_owned()))
})?,
num_joined_members: (db.rooms.room_members(&room_id).count() as u32).into(),
topic: db
.rooms
.room_state_get(&room_id, &EventType::RoomTopic, "")?
.map_or(Ok::<_, Error>(None), |s| {
Ok(Some(
serde_json::from_value::<Raw<topic::TopicEventContent>>(
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid room topic event in database.")
})?
.topic,
))
})?,
world_readable: db
.rooms
.room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")?
.map_or(Ok::<_, Error>(false), |s| {
Ok(serde_json::from_value::<
Raw<history_visibility::HistoryVisibilityEventContent>,
>(s.content.clone())
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
)
})
})?,
guest_can_join: db
.rooms
.room_state_get(&room_id, &EventType::RoomGuestAccess, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomGuestAccessEventContent| {
c.guest_access == GuestAccess::CanJoin
})
})?
.history_visibility
== history_visibility::HistoryVisibility::WorldReadable)
})?,
guest_can_join: db
.rooms
.room_state_get(&room_id, &EventType::RoomGuestAccess, "")?
.map_or(Ok::<_, Error>(false), |s| {
Ok(
serde_json::from_value::<Raw<guest_access::GuestAccessEventContent>>(
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid room guest access event in database.")
})
})?,
avatar_url: db
.rooms
.room_state_get(&room_id, &EventType::RoomAvatar, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomAvatarEventContent| c.url)
.map_err(|_| {
Error::bad_database("Invalid room avatar event in database.")
})
})
.transpose()?
// url is now an Option<String> so we must flatten
.flatten(),
room_id,
};
Ok(chunk)
})
.filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
.filter(|chunk| {
if let Some(query) = filter
.generic_search_term
.as_ref()
.map(|q| q.to_lowercase())
{
if let Some(name) = &chunk.name {
if name.as_str().to_lowercase().contains(&query) {
return true;
})?
.guest_access
== guest_access::GuestAccess::CanJoin,
)
})?,
avatar_url: db
.rooms
.room_state_get(&room_id, &EventType::RoomAvatar, "")?
.map(|s| {
Ok::<_, Error>(
serde_json::from_value::<Raw<avatar::AvatarEventContent>>(
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid room avatar event in database.")
})?
.url,
)
})
.transpose()?
// url is now an Option<String> so we must flatten
.flatten(),
room_id,
};
Ok(chunk)
})
.filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
.filter(|chunk| {
if let Some(query) = filter
.generic_search_term
.as_ref()
.map(|q| q.to_lowercase())
{
if let Some(name) = &chunk.name {
if name.to_lowercase().contains(&query) {
return true;
}
}
}
if let Some(topic) = &chunk.topic {
if topic.to_lowercase().contains(&query) {
return true;
if let Some(topic) = &chunk.topic {
if topic.to_lowercase().contains(&query) {
return true;
}
}
}
if let Some(canonical_alias) = &chunk.canonical_alias {
if canonical_alias.as_str().to_lowercase().contains(&query) {
return true;
if let Some(canonical_alias) = &chunk.canonical_alias {
if canonical_alias.as_str().to_lowercase().contains(&query) {
return true;
}
}
}
false
} else {
// No search term
true
}
})
// We need to collect all, so we can sort by member count
.collect();
false
} else {
// No search term
true
}
})
// We need to collect all, so we can sort by member count
.collect::<Vec<_>>();
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
let total_room_count_estimate = (all_rooms.len() as u32).into();
let chunk: Vec<_> = all_rooms
let chunk = all_rooms
.into_iter()
.skip(num_since as usize)
.take(limit as usize)
.collect();
.collect::<Vec<_>>();
let prev_batch = if num_since == 0 {
None

6
src/client_server/filter.rs

@ -4,9 +4,6 @@ use ruma::api::client::r0::filter::{self, create_filter, get_filter}; @@ -4,9 +4,6 @@ use ruma::api::client::r0::filter::{self, create_filter, get_filter};
#[cfg(feature = "conduit_bin")]
use rocket::{get, post};
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
///
/// TODO: Loads a filter that was previously created.
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))]
#[tracing::instrument]
pub async fn get_filter_route() -> ConduitResult<get_filter::Response> {
@ -21,9 +18,6 @@ pub async fn get_filter_route() -> ConduitResult<get_filter::Response> { @@ -21,9 +18,6 @@ pub async fn get_filter_route() -> ConduitResult<get_filter::Response> {
.into())
}
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
///
/// TODO: Creates a new filter to be used by other endpoints.
#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))]
#[tracing::instrument]
pub async fn create_filter_route() -> ConduitResult<create_filter::Response> {

225
src/client_server/keys.rs

@ -1,42 +1,34 @@ @@ -1,42 +1,34 @@
use super::SESSION_ID_LENGTH;
use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma};
use rocket::futures::{prelude::*, stream::FuturesUnordered};
use super::{State, SESSION_ID_LENGTH};
use crate::{utils, ConduitResult, Database, Error, Result, Ruma};
use ruma::{
api::{
client::{
error::ErrorKind,
r0::{
keys::{
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
upload_signing_keys,
},
uiaa::{AuthFlow, AuthType, UiaaInfo},
api::client::{
error::ErrorKind,
r0::{
keys::{
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
upload_signing_keys,
},
uiaa::{AuthFlow, UiaaInfo},
},
federation,
},
encryption::UnsignedDeviceInfo,
DeviceId, DeviceKeyAlgorithm, UserId,
};
use serde_json::json;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::{
collections::{BTreeMap, HashSet},
sync::Arc,
};
#[cfg(feature = "conduit_bin")]
use rocket::{get, post};
/// # `POST /_matrix/client/r0/keys/upload`
///
/// Publish end-to-end encryption keys for the sender device.
///
/// - Adds one time keys
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/keys/upload", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn upload_keys_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<upload_keys::Request>,
) -> ConduitResult<upload_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -55,7 +47,6 @@ pub async fn upload_keys_route( @@ -55,7 +47,6 @@ pub async fn upload_keys_route(
}
if let Some(device_keys) = &body.device_keys {
// TODO: merge this and the existing event?
// This check is needed to assure that signatures are kept
if db
.users
@ -72,7 +63,7 @@ pub async fn upload_keys_route( @@ -72,7 +63,7 @@ pub async fn upload_keys_route(
}
}
db.flush()?;
db.flush().await?;
Ok(upload_keys::Response {
one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?,
@ -80,20 +71,13 @@ pub async fn upload_keys_route( @@ -80,20 +71,13 @@ pub async fn upload_keys_route(
.into())
}
/// # `POST /_matrix/client/r0/keys/query`
///
/// Get end-to-end encryption keys for the given users.
///
/// - Always fetches users from other servers over federation
/// - Gets master keys, self-signing keys, user signing keys and device keys.
/// - The master and self-signing keys contain signatures that the user is allowed to see
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/keys/query", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_keys_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_keys::Request<'_>>,
) -> ConduitResult<get_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -103,43 +87,34 @@ pub async fn get_keys_route( @@ -103,43 +87,34 @@ pub async fn get_keys_route(
&body.device_keys,
|u| u == sender_user,
&db,
)
.await?;
)?;
Ok(response.into())
}
/// # `POST /_matrix/client/r0/keys/claim`
///
/// Claims one-time keys
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/keys/claim", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn claim_keys_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<claim_keys::Request>,
) -> ConduitResult<claim_keys::Response> {
let response = claim_keys_helper(&body.one_time_keys, &db).await?;
let response = claim_keys_helper(&body.one_time_keys, &db)?;
db.flush()?;
db.flush().await?;
Ok(response.into())
}
/// # `POST /_matrix/client/r0/keys/device_signing/upload`
///
/// Uploads end-to-end key information for the sender user.
///
/// - Requires UIAA to verify password
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/unstable/keys/device_signing/upload", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn upload_signing_keys_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<upload_signing_keys::Request<'_>>,
) -> ConduitResult<upload_signing_keys::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -148,7 +123,7 @@ pub async fn upload_signing_keys_route( @@ -148,7 +123,7 @@ pub async fn upload_signing_keys_route(
// UIAA
let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
stages: vec!["m.login.password".to_owned()],
}],
completed: Vec::new(),
params: Default::default(),
@ -158,8 +133,8 @@ pub async fn upload_signing_keys_route( @@ -158,8 +133,8 @@ pub async fn upload_signing_keys_route(
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = db.uiaa.try_auth(
sender_user,
sender_device,
&sender_user,
&sender_device,
auth,
&uiaainfo,
&db.users,
@ -169,19 +144,21 @@ pub async fn upload_signing_keys_route( @@ -169,19 +144,21 @@ pub async fn upload_signing_keys_route(
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
} else {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
db.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
db.uiaa.create(
&sender_user,
&sender_device,
&uiaainfo,
&body.json_body.expect("body is json"),
)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
if let Some(master_key) = &body.master_key {
db.users.add_cross_signing_keys(
sender_user,
master_key,
&master_key,
&body.self_signing_key,
&body.user_signing_key,
&db.rooms,
@ -189,21 +166,18 @@ pub async fn upload_signing_keys_route( @@ -189,21 +166,18 @@ pub async fn upload_signing_keys_route(
)?;
}
db.flush()?;
db.flush().await?;
Ok(upload_signing_keys::Response {}.into())
Ok(upload_signing_keys::Response.into())
}
/// # `POST /_matrix/client/r0/keys/signatures/upload`
///
/// Uploads end-to-end key signatures from the sender user.
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/unstable/keys/signatures/upload", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn upload_signatures_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<upload_signatures::Request>,
) -> ConduitResult<upload_signatures::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -242,10 +216,10 @@ pub async fn upload_signatures_route( @@ -242,10 +216,10 @@ pub async fn upload_signatures_route(
.to_owned(),
);
db.users.sign_key(
user_id,
key_id,
&user_id,
&key_id,
signature,
sender_user,
&sender_user,
&db.rooms,
&db.globals,
)?;
@ -253,23 +227,18 @@ pub async fn upload_signatures_route( @@ -253,23 +227,18 @@ pub async fn upload_signatures_route(
}
}
db.flush()?;
db.flush().await?;
Ok(upload_signatures::Response {}.into())
Ok(upload_signatures::Response.into())
}
/// # `POST /_matrix/client/r0/keys/changes`
///
/// Gets a list of users who have updated their device identity keys since the previous sync token.
///
/// - TODO: left users
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/keys/changes", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_key_changes_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_key_changes::Request<'_>>,
) -> ConduitResult<get_key_changes::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -314,9 +283,9 @@ pub async fn get_key_changes_route( @@ -314,9 +283,9 @@ pub async fn get_key_changes_route(
.into())
}
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
pub fn get_keys_helper<F: Fn(&UserId) -> bool>(
sender_user: Option<&UserId>,
device_keys_input: &BTreeMap<Box<UserId>, Vec<Box<DeviceId>>>,
device_keys_input: &BTreeMap<UserId, Vec<Box<DeviceId>>>,
allowed_signatures: F,
db: &Database,
) -> Result<get_keys::Response> {
@ -325,19 +294,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>( @@ -325,19 +294,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
let mut user_signing_keys = BTreeMap::new();
let mut device_keys = BTreeMap::new();
let mut get_over_federation = HashMap::new();
for (user_id, device_ids) in device_keys_input {
let user_id: &UserId = &**user_id;
if user_id.server_name() != db.globals.server_name() {
get_over_federation
.entry(user_id.server_name())
.or_insert_with(Vec::new)
.push((user_id, device_ids));
continue;
}
if device_ids.is_empty() {
let mut container = BTreeMap::new();
for device_id in db.users.all_device_ids(user_id) {
@ -357,12 +314,12 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>( @@ -357,12 +314,12 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
container.insert(device_id, keys);
}
}
device_keys.insert(user_id.to_owned(), container);
device_keys.insert(user_id.clone(), container);
} else {
for device_id in device_ids {
let mut container = BTreeMap::new();
if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? {
let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), &device_id)? {
let metadata = db.users.get_device_metadata(user_id, &device_id)?.ok_or(
Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to get keys for nonexistent device.",
@ -373,61 +330,24 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>( @@ -373,61 +330,24 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
device_display_name: metadata.display_name,
};
container.insert(device_id.to_owned(), keys);
container.insert(device_id.clone(), keys);
}
device_keys.insert(user_id.to_owned(), container);
device_keys.insert(user_id.clone(), container);
}
}
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
master_keys.insert(user_id.to_owned(), master_key);
master_keys.insert(user_id.clone(), master_key);
}
if let Some(self_signing_key) = db
.users
.get_self_signing_key(user_id, &allowed_signatures)?
{
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
self_signing_keys.insert(user_id.clone(), self_signing_key);
}
if Some(user_id) == sender_user {
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
}
}
}
let mut failures = BTreeMap::new();
let mut futures: FuturesUnordered<_> = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
}
(
server,
db.sending
.send_federation_request(
&db.globals,
server,
federation::keys::get_keys::v1::Request {
device_keys: device_keys_input_fed,
},
)
.await,
)
})
.collect();
while let Some((server, response)) = futures.next().await {
match response {
Ok(response) => {
master_keys.extend(response.master_keys);
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
}
Err(_e) => {
failures.insert(server.to_string(), json!({}));
user_signing_keys.insert(user_id.clone(), user_signing_key);
}
}
}
@ -437,26 +357,16 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>( @@ -437,26 +357,16 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
self_signing_keys,
user_signing_keys,
device_keys,
failures,
failures: BTreeMap::new(),
})
}
pub(crate) async fn claim_keys_helper(
one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
pub fn claim_keys_helper(
one_time_keys_input: &BTreeMap<UserId, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
db: &Database,
) -> Result<claim_keys::Response> {
let mut one_time_keys = BTreeMap::new();
let mut get_over_federation = BTreeMap::new();
for (user_id, map) in one_time_keys_input {
if user_id.server_name() != db.globals.server_name() {
get_over_federation
.entry(user_id.server_name())
.or_insert_with(Vec::new)
.push((user_id, map));
}
let mut container = BTreeMap::new();
for (device_id, key_algorithm) in map {
if let Some(one_time_keys) =
@ -471,33 +381,8 @@ pub(crate) async fn claim_keys_helper( @@ -471,33 +381,8 @@ pub(crate) async fn claim_keys_helper(
one_time_keys.insert(user_id.clone(), container);
}
let mut failures = BTreeMap::new();
for (server, vec) in get_over_federation {
let mut one_time_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
}
// Ignore failures
if let Ok(keys) = db
.sending
.send_federation_request(
&db.globals,
server,
federation::keys::claim_keys::v1::Request {
one_time_keys: one_time_keys_input_fed,
},
)
.await
{
one_time_keys.extend(keys.one_time_keys);
} else {
failures.insert(server.to_string(), json!({}));
}
}
Ok(claim_keys::Response {
failures,
failures: BTreeMap::new(),
one_time_keys,
})
}

37
src/client_server/media.rs

@ -1,25 +1,20 @@ @@ -1,25 +1,20 @@
use crate::{
database::{media::FileMeta, DatabaseGuard},
utils, ConduitResult, Error, Ruma,
};
use super::State;
use crate::{database::media::FileMeta, utils, ConduitResult, Database, Error, Ruma};
use ruma::api::client::{
error::ErrorKind,
r0::media::{create_content, get_content, get_content_thumbnail, get_media_config},
};
use std::convert::TryInto;
#[cfg(feature = "conduit_bin")]
use rocket::{get, post};
use std::{convert::TryInto, sync::Arc};
const MXC_LENGTH: usize = 32;
/// # `GET /_matrix/media/r0/config`
///
/// Returns max upload size.
#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))]
#[tracing::instrument(skip(db))]
pub async fn get_media_config_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
) -> ConduitResult<get_media_config::Response> {
Ok(get_media_config::Response {
upload_size: db.globals.max_request_size().into(),
@ -27,19 +22,13 @@ pub async fn get_media_config_route( @@ -27,19 +22,13 @@ pub async fn get_media_config_route(
.into())
}
/// # `POST /_matrix/media/r0/upload`
///
/// Permanently save media in the server.
///
/// - Some metadata will be saved in the database
/// - Media will be saved in the media/ directory
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/media/r0/upload", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn create_content_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<create_content::Request<'_>>,
) -> ConduitResult<create_content::Response> {
let mxc = format!(
@ -62,7 +51,7 @@ pub async fn create_content_route( @@ -62,7 +51,7 @@ pub async fn create_content_route(
)
.await?;
db.flush()?;
db.flush().await?;
Ok(create_content::Response {
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
@ -71,18 +60,13 @@ pub async fn create_content_route( @@ -71,18 +60,13 @@ pub async fn create_content_route(
.into())
}
/// # `POST /_matrix/media/r0/download/{serverName}/{mediaId}`
///
/// Load media from our server or over federation.
///
/// - Only allows federation if `allow_remote` is true
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/media/r0/download/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_content_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_content::Request<'_>>,
) -> ConduitResult<get_content::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
@ -129,18 +113,13 @@ pub async fn get_content_route( @@ -129,18 +113,13 @@ pub async fn get_content_route(
}
}
/// # `POST /_matrix/media/r0/thumbnail/{serverName}/{mediaId}`
///
/// Load media thumbnail from our server or over federation.
///
/// - Only allows federation if `allow_remote` is true
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_content_thumbnail_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_content_thumbnail::Request<'_>>,
) -> ConduitResult<get_content_thumbnail::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);

767
src/client_server/membership.rs

File diff suppressed because it is too large Load Diff

88
src/client_server/message.rs

@ -1,53 +1,34 @@ @@ -1,53 +1,34 @@
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
use super::State;
use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
r0::message::{get_message_events, send_message_event},
},
events::EventType,
EventId,
};
use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
};
use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};
/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`
///
/// Send a message event into the room.
///
/// - Is a NOOP if the txn id was already used before and returns the same event id again
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn send_message_event_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<send_message_event::Request<'_>>,
) -> ConduitResult<send_message_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref();
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
// Forbid m.room.encrypted if encryption is disabled
if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption has been disabled",
));
}
// Check if this is a new transaction id
if let Some(response) =
db.transaction_ids
@ -62,10 +43,11 @@ pub async fn send_message_event_route( @@ -62,10 +43,11 @@ pub async fn send_message_event_route(
));
}
let event_id = utils::string_from_bytes(&response)
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
.try_into()
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
let event_id = EventId::try_from(
utils::string_from_bytes(&response)
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?,
)
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
return Ok(send_message_event::Response { event_id }.into());
}
@ -81,10 +63,9 @@ pub async fn send_message_event_route( @@ -81,10 +63,9 @@ pub async fn send_message_event_route(
state_key: None,
redacts: None,
},
sender_user,
&sender_user,
&body.room_id,
&db,
&state_lock,
)?;
db.transaction_ids.add_txnid(
@ -94,26 +75,18 @@ pub async fn send_message_event_route( @@ -94,26 +75,18 @@ pub async fn send_message_event_route(
event_id.as_bytes(),
)?;
drop(state_lock);
db.flush()?;
db.flush().await?;
Ok(send_message_event::Response::new((*event_id).to_owned()).into())
Ok(send_message_event::Response::new(event_id).into())
}
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
///
/// Allows paginating through room history.
///
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
/// joined, depending on history_visibility)
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/rooms/<_>/messages", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_message_events_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_message_events::Request<'_>>,
) -> ConduitResult<get_message_events::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -134,13 +107,16 @@ pub async fn get_message_events_route( @@ -134,13 +107,16 @@ pub async fn get_message_events_route(
let to = body.to.as_ref().map(|t| t.parse());
// Use limit or else 10
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
let limit = body
.limit
.try_into()
.map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?;
match body.dir {
get_message_events::Direction::Forward => {
let events_after: Vec<_> = db
let events_after = db
.rooms
.pdus_after(sender_user, &body.room_id, from)?
.pdus_after(&sender_user, &body.room_id, from)
.take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter_map(|(pdu_id, pdu)| {
@ -150,14 +126,14 @@ pub async fn get_message_events_route( @@ -150,14 +126,14 @@ pub async fn get_message_events_route(
.ok()
})
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
.collect();
.collect::<Vec<_>>();
let end_token = events_after.last().map(|(count, _)| count.to_string());
let events_after: Vec<_> = events_after
let events_after = events_after
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
.collect::<Vec<_>>();
let mut resp = get_message_events::Response::new();
resp.start = Some(body.from.to_owned());
@ -168,9 +144,9 @@ pub async fn get_message_events_route( @@ -168,9 +144,9 @@ pub async fn get_message_events_route(
Ok(resp.into())
}
get_message_events::Direction::Backward => {
let events_before: Vec<_> = db
let events_before = db
.rooms
.pdus_until(sender_user, &body.room_id, from)?
.pdus_until(&sender_user, &body.room_id, from)
.take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter_map(|(pdu_id, pdu)| {
@ -180,14 +156,14 @@ pub async fn get_message_events_route( @@ -180,14 +156,14 @@ pub async fn get_message_events_route(
.ok()
})
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
.collect();
.collect::<Vec<_>>();
let start_token = events_before.last().map(|(count, _)| count.to_string());
let events_before: Vec<_> = events_before
let events_before = events_before
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
.collect::<Vec<_>>();
let mut resp = get_message_events::Response::new();
resp.start = Some(body.from.to_owned());

11
src/client_server/mod.rs

@ -16,7 +16,6 @@ mod profile; @@ -16,7 +16,6 @@ mod profile;
mod push;
mod read_marker;
mod redact;
mod report;
mod room;
mod search;
mod session;
@ -48,7 +47,6 @@ pub use profile::*; @@ -48,7 +47,6 @@ pub use profile::*;
pub use push::*;
pub use read_marker::*;
pub use redact::*;
pub use report::*;
pub use room::*;
pub use search::*;
pub use session::*;
@ -66,19 +64,18 @@ pub use voip::*; @@ -66,19 +64,18 @@ pub use voip::*;
use super::State;
#[cfg(feature = "conduit_bin")]
use {
crate::ConduitResult, rocket::options, ruma::api::client::r0::to_device::send_event_to_device,
crate::ConduitResult,
rocket::{options, State},
ruma::api::client::r0::to_device::send_event_to_device,
};
pub const DEVICE_ID_LENGTH: usize = 10;
pub const TOKEN_LENGTH: usize = 256;
pub const SESSION_ID_LENGTH: usize = 256;
/// # `OPTIONS`
///
/// Web clients use this to get CORS headers.
#[cfg(feature = "conduit_bin")]
#[options("/<_..>")]
#[tracing::instrument]
pub async fn options_route() -> ConduitResult<send_event_to_device::Response> {
Ok(send_event_to_device::Response {}.into())
Ok(send_event_to_device::Response.into())
}

32
src/client_server/presence.rs

@ -1,35 +1,33 @@ @@ -1,35 +1,33 @@
use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma};
use super::State;
use crate::{utils, ConduitResult, Database, Ruma};
use ruma::api::client::r0::presence::{get_presence, set_presence};
use std::{convert::TryInto, time::Duration};
use std::{convert::TryInto, sync::Arc, time::Duration};
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
///
/// Sets the presence state of the sender user.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/presence/<_>/status", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_presence_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_presence::Request<'_>>,
) -> ConduitResult<set_presence::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
for room_id in db.rooms.rooms_joined(sender_user) {
for room_id in db.rooms.rooms_joined(&sender_user) {
let room_id = room_id?;
db.rooms.edus.update_presence(
sender_user,
&sender_user,
&room_id,
ruma::events::presence::PresenceEvent {
content: ruma::events::presence::PresenceEventContent {
avatar_url: db.users.avatar_url(sender_user)?,
avatar_url: db.users.avatar_url(&sender_user)?,
currently_active: None,
displayname: db.users.displayname(sender_user)?,
displayname: db.users.displayname(&sender_user)?,
last_active_ago: Some(
utils::millis_since_unix_epoch()
.try_into()
@ -44,23 +42,18 @@ pub async fn set_presence_route( @@ -44,23 +42,18 @@ pub async fn set_presence_route(
)?;
}
db.flush()?;
db.flush().await?;
Ok(set_presence::Response {}.into())
Ok(set_presence::Response.into())
}
/// # `GET /_matrix/client/r0/presence/{userId}/status`
///
/// Gets the presence state of the given user.
///
/// - Only works if you share a room with the user
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/presence/<_>/status", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_presence_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_presence::Request<'_>>,
) -> ConduitResult<get_presence::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -76,10 +69,9 @@ pub async fn get_presence_route( @@ -76,10 +69,9 @@ pub async fn get_presence_route(
if let Some(presence) = db
.rooms
.edus
.get_last_presence_event(sender_user, &room_id)?
.get_last_presence_event(&sender_user, &room_id)?
{
presence_event = Some(presence);
break;
}
}

203
src/client_server/profile.rs

@ -1,53 +1,46 @@ @@ -1,53 +1,46 @@
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
use super::State;
use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma};
use ruma::{
api::{
client::{
error::ErrorKind,
r0::profile::{
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
},
api::client::{
error::ErrorKind,
r0::profile::{
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
},
federation::{self, query::get_profile_information::v1::ProfileField},
},
events::{room::member::RoomMemberEventContent, EventType},
events::EventType,
serde::Raw,
};
use serde_json::value::to_raw_value;
use std::{convert::TryInto, sync::Arc};
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};
use std::{convert::TryInto, sync::Arc};
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
///
/// Updates the displayname.
///
/// - Also makes sure other users receive the update using presence EDUs
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/profile/<_>/displayname", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_displayname_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_display_name::Request<'_>>,
) -> ConduitResult<set_display_name::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
db.users
.set_displayname(sender_user, body.displayname.clone())?;
.set_displayname(&sender_user, body.displayname.clone())?;
// Send a new membership event and presence update into all joined rooms
let all_rooms_joined: Vec<_> = db
for (pdu_builder, room_id) in db
.rooms
.rooms_joined(sender_user)
.rooms_joined(&sender_user)
.filter_map(|r| r.ok())
.map(|room_id| {
Ok::<_, Error>((
PduBuilder {
event_type: EventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
content: serde_json::to_value(ruma::events::room::member::MemberEventContent {
displayname: body.displayname.clone(),
..serde_json::from_str(
..serde_json::from_value::<Raw<_>>(
db.rooms
.room_state_get(
&room_id,
@ -57,12 +50,14 @@ pub async fn set_displayname_route( @@ -57,12 +50,14 @@ pub async fn set_displayname_route(
.ok_or_else(|| {
Error::bad_database(
"Tried to send displayname update for user not in the \
room.",
room.",
)
})?
.content
.get(),
.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| Error::bad_database("Database contains invalid PDU."))?
})
.expect("event is valid, we just created it"),
@ -74,32 +69,20 @@ pub async fn set_displayname_route( @@ -74,32 +69,20 @@ pub async fn set_displayname_route(
))
})
.filter_map(|r| r.ok())
.collect();
for (pdu_builder, room_id) in all_rooms_joined {
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
{
let _ = db
.rooms
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db);
// Presence update
db.rooms.edus.update_presence(
sender_user,
&sender_user,
&room_id,
ruma::events::presence::PresenceEvent {
content: ruma::events::presence::PresenceEventContent {
avatar_url: db.users.avatar_url(sender_user)?,
avatar_url: db.users.avatar_url(&sender_user)?,
currently_active: None,
displayname: db.users.displayname(sender_user)?,
displayname: db.users.displayname(&sender_user)?,
last_active_ago: Some(
utils::millis_since_unix_epoch()
.try_into()
@ -114,83 +97,52 @@ pub async fn set_displayname_route( @@ -114,83 +97,52 @@ pub async fn set_displayname_route(
)?;
}
db.flush()?;
db.flush().await?;
Ok(set_display_name::Response {}.into())
Ok(set_display_name::Response.into())
}
/// # `GET /_matrix/client/r0/profile/{userId}/displayname`
///
/// Returns the displayname of the user.
///
/// - If user is on another server: Fetches displayname over federation
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/profile/<_>/displayname", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_displayname_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_display_name::Request<'_>>,
) -> ConduitResult<get_display_name::Response> {
if body.user_id.server_name() != db.globals.server_name() {
let response = db
.sending
.send_federation_request(
&db.globals,
body.user_id.server_name(),
federation::query::get_profile_information::v1::Request {
user_id: &body.user_id,
field: Some(&ProfileField::DisplayName),
},
)
.await?;
return Ok(get_display_name::Response {
displayname: response.displayname,
}
.into());
}
Ok(get_display_name::Response {
displayname: db.users.displayname(&body.user_id)?,
}
.into())
}
/// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url`
///
/// Updates the avatar_url and blurhash.
///
/// - Also makes sure other users receive the update using presence EDUs
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/profile/<_>/avatar_url", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_avatar_url_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_avatar_url::Request<'_>>,
) -> ConduitResult<set_avatar_url::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
db.users
.set_avatar_url(sender_user, body.avatar_url.clone())?;
db.users.set_blurhash(sender_user, body.blurhash.clone())?;
.set_avatar_url(&sender_user, body.avatar_url.clone())?;
// Send a new membership event and presence update into all joined rooms
let all_joined_rooms: Vec<_> = db
for (pdu_builder, room_id) in db
.rooms
.rooms_joined(sender_user)
.rooms_joined(&sender_user)
.filter_map(|r| r.ok())
.map(|room_id| {
Ok::<_, Error>((
PduBuilder {
event_type: EventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
content: serde_json::to_value(ruma::events::room::member::MemberEventContent {
avatar_url: body.avatar_url.clone(),
..serde_json::from_str(
..serde_json::from_value::<Raw<_>>(
db.rooms
.room_state_get(
&room_id,
@ -200,12 +152,14 @@ pub async fn set_avatar_url_route( @@ -200,12 +152,14 @@ pub async fn set_avatar_url_route(
.ok_or_else(|| {
Error::bad_database(
"Tried to send displayname update for user not in the \
room.",
room.",
)
})?
.content
.get(),
.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| Error::bad_database("Database contains invalid PDU."))?
})
.expect("event is valid, we just created it"),
@ -217,32 +171,20 @@ pub async fn set_avatar_url_route( @@ -217,32 +171,20 @@ pub async fn set_avatar_url_route(
))
})
.filter_map(|r| r.ok())
.collect();
for (pdu_builder, room_id) in all_joined_rooms {
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
{
let _ = db
.rooms
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db);
// Presence update
db.rooms.edus.update_presence(
sender_user,
&sender_user,
&room_id,
ruma::events::presence::PresenceEvent {
content: ruma::events::presence::PresenceEventContent {
avatar_url: db.users.avatar_url(sender_user)?,
avatar_url: db.users.avatar_url(&sender_user)?,
currently_active: None,
displayname: db.users.displayname(sender_user)?,
displayname: db.users.displayname(&sender_user)?,
last_active_ago: Some(
utils::millis_since_unix_epoch()
.try_into()
@ -257,87 +199,35 @@ pub async fn set_avatar_url_route( @@ -257,87 +199,35 @@ pub async fn set_avatar_url_route(
)?;
}
db.flush()?;
db.flush().await?;
Ok(set_avatar_url::Response {}.into())
Ok(set_avatar_url::Response.into())
}
/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url`
///
/// Returns the avatar_url and blurhash of the user.
///
/// - If user is on another server: Fetches avatar_url and blurhash over federation
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/profile/<_>/avatar_url", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_avatar_url_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_avatar_url::Request<'_>>,
) -> ConduitResult<get_avatar_url::Response> {
if body.user_id.server_name() != db.globals.server_name() {
let response = db
.sending
.send_federation_request(
&db.globals,
body.user_id.server_name(),
federation::query::get_profile_information::v1::Request {
user_id: &body.user_id,
field: Some(&ProfileField::AvatarUrl),
},
)
.await?;
return Ok(get_avatar_url::Response {
avatar_url: response.avatar_url,
blurhash: response.blurhash,
}
.into());
}
Ok(get_avatar_url::Response {
avatar_url: db.users.avatar_url(&body.user_id)?,
blurhash: db.users.blurhash(&body.user_id)?,
}
.into())
}
/// # `GET /_matrix/client/r0/profile/{userId}`
///
/// Returns the displayname, avatar_url and blurhash of the user.
///
/// - If user is on another server: Fetches profile over federation
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/profile/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_profile_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_profile::Request<'_>>,
) -> ConduitResult<get_profile::Response> {
if body.user_id.server_name() != db.globals.server_name() {
let response = db
.sending
.send_federation_request(
&db.globals,
body.user_id.server_name(),
federation::query::get_profile_information::v1::Request {
user_id: &body.user_id,
field: None,
},
)
.await?;
return Ok(get_profile::Response {
displayname: response.displayname,
avatar_url: response.avatar_url,
blurhash: response.blurhash,
}
.into());
}
if !db.users.exists(&body.user_id)? {
// Return 404 if this user doesn't exist
return Err(Error::BadRequest(
@ -348,7 +238,6 @@ pub async fn get_profile_route( @@ -348,7 +238,6 @@ pub async fn get_profile_route(
Ok(get_profile::Response {
avatar_url: db.users.avatar_url(&body.user_id)?,
blurhash: db.users.blurhash(&body.user_id)?,
displayname: db.users.displayname(&body.user_id)?,
}
.into())

173
src/client_server/push.rs

@ -1,4 +1,7 @@ @@ -1,4 +1,7 @@
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use std::sync::Arc;
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
@ -8,30 +11,27 @@ use ruma::{ @@ -8,30 +11,27 @@ use ruma::{
set_pushrule_enabled, RuleKind,
},
},
events::{push_rules::PushRulesEvent, EventType},
events::{push_rules, EventType},
push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit},
};
#[cfg(feature = "conduit_bin")]
use rocket::{delete, get, post, put};
/// # `GET /_matrix/client/r0/pushrules`
///
/// Retrieves the push rules event for this user.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/pushrules", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_pushrules_all_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_pushrules_all::Request>,
) -> ConduitResult<get_pushrules_all::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event: PushRulesEvent = db
let event = db
.account_data
.get(None, sender_user, EventType::PushRules)?
.get::<push_rules::PushRulesEvent>(None, &sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
@ -43,23 +43,20 @@ pub async fn get_pushrules_all_route( @@ -43,23 +43,20 @@ pub async fn get_pushrules_all_route(
.into())
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
///
/// Retrieves a single specified push rule for this user.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_pushrule_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_pushrule::Request<'_>>,
) -> ConduitResult<get_pushrule::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event: PushRulesEvent = db
let event = db
.account_data
.get(None, sender_user, EventType::PushRules)?
.get::<push_rules::PushRulesEvent>(None, &sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
@ -87,7 +84,7 @@ pub async fn get_pushrule_route( @@ -87,7 +84,7 @@ pub async fn get_pushrule_route(
.content
.get(body.rule_id.as_str())
.map(|rule| rule.clone().into()),
_ => None,
RuleKind::_Custom(_) => None,
};
if let Some(rule) = rule {
@ -100,20 +97,17 @@ pub async fn get_pushrule_route( @@ -100,20 +97,17 @@ pub async fn get_pushrule_route(
}
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
///
/// Creates a single specified push rule for this user.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>")
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<req>")
)]
#[tracing::instrument(skip(db, body))]
#[tracing::instrument(skip(db, req))]
pub async fn set_pushrule_route(
db: DatabaseGuard,
body: Ruma<set_pushrule::Request<'_>>,
db: State<'_, Arc<Database>>,
req: Ruma<set_pushrule::Request<'_>>,
) -> ConduitResult<set_pushrule::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
let sender_user = req.sender_user.as_ref().expect("user is authenticated");
let body = req.body;
if body.scope != "global" {
return Err(Error::BadRequest(
@ -122,9 +116,9 @@ pub async fn set_pushrule_route( @@ -122,9 +116,9 @@ pub async fn set_pushrule_route(
));
}
let mut event: PushRulesEvent = db
let mut event = db
.account_data
.get(None, sender_user, EventType::PushRules)?
.get::<push_rules::PushRulesEvent>(None, &sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
@ -190,27 +184,29 @@ pub async fn set_pushrule_route( @@ -190,27 +184,29 @@ pub async fn set_pushrule_route(
.into(),
);
}
_ => {}
RuleKind::_Custom(_) => {}
}
db.account_data
.update(None, sender_user, EventType::PushRules, &event, &db.globals)?;
db.account_data.update(
None,
&sender_user,
EventType::PushRules,
&event,
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(set_pushrule::Response {}.into())
Ok(set_pushrule::Response.into())
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
///
/// Gets the actions of a single specified push rule for this user.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_pushrule_actions_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_pushrule_actions::Request<'_>>,
) -> ConduitResult<get_pushrule_actions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -222,9 +218,9 @@ pub async fn get_pushrule_actions_route( @@ -222,9 +218,9 @@ pub async fn get_pushrule_actions_route(
));
}
let mut event: PushRulesEvent = db
let mut event = db
.account_data
.get(None, sender_user, EventType::PushRules)?
.get::<push_rules::PushRulesEvent>(None, &sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
@ -252,10 +248,10 @@ pub async fn get_pushrule_actions_route( @@ -252,10 +248,10 @@ pub async fn get_pushrule_actions_route(
.content
.get(body.rule_id.as_str())
.map(|rule| rule.actions.clone()),
_ => None,
RuleKind::_Custom(_) => None,
};
db.flush()?;
db.flush().await?;
Ok(get_pushrule_actions::Response {
actions: actions.unwrap_or_default(),
@ -263,16 +259,13 @@ pub async fn get_pushrule_actions_route( @@ -263,16 +259,13 @@ pub async fn get_pushrule_actions_route(
.into())
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
///
/// Sets the actions of a single specified push rule for this user.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_pushrule_actions_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_pushrule_actions::Request<'_>>,
) -> ConduitResult<set_pushrule_actions::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -284,9 +277,9 @@ pub async fn set_pushrule_actions_route( @@ -284,9 +277,9 @@ pub async fn set_pushrule_actions_route(
));
}
let mut event: PushRulesEvent = db
let mut event = db
.account_data
.get(None, sender_user, EventType::PushRules)?
.get::<push_rules::PushRulesEvent>(None, &sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
@ -324,27 +317,29 @@ pub async fn set_pushrule_actions_route( @@ -324,27 +317,29 @@ pub async fn set_pushrule_actions_route(
global.content.replace(rule);
}
}
_ => {}
RuleKind::_Custom(_) => {}
};
db.account_data
.update(None, sender_user, EventType::PushRules, &event, &db.globals)?;
db.account_data.update(
None,
&sender_user,
EventType::PushRules,
&event,
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(set_pushrule_actions::Response {}.into())
Ok(set_pushrule_actions::Response.into())
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
///
/// Gets the enabled status of a single specified push rule for this user.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_pushrule_enabled_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_pushrule_enabled::Request<'_>>,
) -> ConduitResult<get_pushrule_enabled::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -356,9 +351,9 @@ pub async fn get_pushrule_enabled_route( @@ -356,9 +351,9 @@ pub async fn get_pushrule_enabled_route(
));
}
let mut event: PushRulesEvent = db
let mut event = db
.account_data
.get(None, sender_user, EventType::PushRules)?
.get::<push_rules::PushRulesEvent>(None, &sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
@ -391,24 +386,21 @@ pub async fn get_pushrule_enabled_route( @@ -391,24 +386,21 @@ pub async fn get_pushrule_enabled_route(
.iter()
.find(|rule| rule.rule_id == body.rule_id)
.map_or(false, |rule| rule.enabled),
_ => false,
RuleKind::_Custom(_) => false,
};
db.flush()?;
db.flush().await?;
Ok(get_pushrule_enabled::Response { enabled }.into())
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
///
/// Sets the enabled status of a single specified push rule for this user.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_pushrule_enabled_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_pushrule_enabled::Request<'_>>,
) -> ConduitResult<set_pushrule_enabled::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -420,9 +412,9 @@ pub async fn set_pushrule_enabled_route( @@ -420,9 +412,9 @@ pub async fn set_pushrule_enabled_route(
));
}
let mut event: PushRulesEvent = db
let mut event = db
.account_data
.get(None, sender_user, EventType::PushRules)?
.get::<ruma::events::push_rules::PushRulesEvent>(None, &sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
@ -465,27 +457,29 @@ pub async fn set_pushrule_enabled_route( @@ -465,27 +457,29 @@ pub async fn set_pushrule_enabled_route(
global.content.insert(rule);
}
}
_ => {}
RuleKind::_Custom(_) => {}
}
db.account_data
.update(None, sender_user, EventType::PushRules, &event, &db.globals)?;
db.account_data.update(
None,
&sender_user,
EventType::PushRules,
&event,
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(set_pushrule_enabled::Response {}.into())
Ok(set_pushrule_enabled::Response.into())
}
/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
///
/// Deletes a single specified push rule for this user.
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_pushrule_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_pushrule::Request<'_>>,
) -> ConduitResult<delete_pushrule::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -497,9 +491,9 @@ pub async fn delete_pushrule_route( @@ -497,9 +491,9 @@ pub async fn delete_pushrule_route(
));
}
let mut event: PushRulesEvent = db
let mut event = db
.account_data
.get(None, sender_user, EventType::PushRules)?
.get::<push_rules::PushRulesEvent>(None, &sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
@ -532,27 +526,29 @@ pub async fn delete_pushrule_route( @@ -532,27 +526,29 @@ pub async fn delete_pushrule_route(
global.content.remove(&rule);
}
}
_ => {}
RuleKind::_Custom(_) => {}
}
db.account_data
.update(None, sender_user, EventType::PushRules, &event, &db.globals)?;
db.account_data.update(
None,
&sender_user,
EventType::PushRules,
&event,
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(delete_pushrule::Response {}.into())
Ok(delete_pushrule::Response.into())
}
/// # `GET /_matrix/client/r0/pushers`
///
/// Gets all currently active pushers for the sender user.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/pushers", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_pushers_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_pushers::Request>,
) -> ConduitResult<get_pushers::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -563,18 +559,13 @@ pub async fn get_pushers_route( @@ -563,18 +559,13 @@ pub async fn get_pushers_route(
.into())
}
/// # `POST /_matrix/client/r0/pushers/set`
///
/// Adds a pusher for the sender user.
///
/// - TODO: Handle `append`
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/pushers/set", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_pushers_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_pusher::Request>,
) -> ConduitResult<set_pusher::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -582,7 +573,7 @@ pub async fn set_pushers_route( @@ -582,7 +573,7 @@ pub async fn set_pushers_route(
db.pusher.set_pusher(sender_user, pusher)?;
db.flush()?;
db.flush().await?;
Ok(set_pusher::Response::default().into())
}

40
src/client_server/read_marker.rs

@ -1,4 +1,5 @@ @@ -1,4 +1,5 @@
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
@ -8,24 +9,18 @@ use ruma::{ @@ -8,24 +9,18 @@ use ruma::{
receipt::ReceiptType,
MilliSecondsSinceUnixEpoch,
};
use std::collections::BTreeMap;
#[cfg(feature = "conduit_bin")]
use rocket::post;
use std::{collections::BTreeMap, sync::Arc};
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
///
/// Sets different types of read markers.
///
/// - Updates fully-read account data event to `fully_read`
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/rooms/<_>/read_markers", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn set_read_marker_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<set_read_marker::Request<'_>>,
) -> ConduitResult<set_read_marker::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -37,7 +32,7 @@ pub async fn set_read_marker_route( @@ -37,7 +32,7 @@ pub async fn set_read_marker_route(
};
db.account_data.update(
Some(&body.room_id),
sender_user,
&sender_user,
EventType::FullyRead,
&fully_read_event,
&db.globals,
@ -46,7 +41,7 @@ pub async fn set_read_marker_route( @@ -46,7 +41,7 @@ pub async fn set_read_marker_route(
if let Some(event) = &body.read_receipt {
db.rooms.edus.private_read_set(
&body.room_id,
sender_user,
&sender_user,
db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Event does not exist.",
@ -54,7 +49,7 @@ pub async fn set_read_marker_route( @@ -54,7 +49,7 @@ pub async fn set_read_marker_route(
&db.globals,
)?;
db.rooms
.reset_notification_counts(sender_user, &body.room_id)?;
.reset_notification_counts(&sender_user, &body.room_id)?;
let mut user_receipts = BTreeMap::new();
user_receipts.insert(
@ -71,7 +66,7 @@ pub async fn set_read_marker_route( @@ -71,7 +66,7 @@ pub async fn set_read_marker_route(
receipt_content.insert(event.to_owned(), receipts);
db.rooms.edus.readreceipt_update(
sender_user,
&sender_user,
&body.room_id,
AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
@ -81,28 +76,25 @@ pub async fn set_read_marker_route( @@ -81,28 +76,25 @@ pub async fn set_read_marker_route(
)?;
}
db.flush()?;
db.flush().await?;
Ok(set_read_marker::Response {}.into())
Ok(set_read_marker::Response.into())
}
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
///
/// Sets private read marker and public read receipt EDU.
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn create_receipt_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<create_receipt::Request<'_>>,
) -> ConduitResult<create_receipt::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
db.rooms.edus.private_read_set(
&body.room_id,
sender_user,
&sender_user,
db.rooms
.get_pdu_count(&body.event_id)?
.ok_or(Error::BadRequest(
@ -112,7 +104,7 @@ pub async fn create_receipt_route( @@ -112,7 +104,7 @@ pub async fn create_receipt_route(
&db.globals,
)?;
db.rooms
.reset_notification_counts(sender_user, &body.room_id)?;
.reset_notification_counts(&sender_user, &body.room_id)?;
let mut user_receipts = BTreeMap::new();
user_receipts.insert(
@ -128,7 +120,7 @@ pub async fn create_receipt_route( @@ -128,7 +120,7 @@ pub async fn create_receipt_route(
receipt_content.insert(body.event_id.to_owned(), receipts);
db.rooms.edus.readreceipt_update(
sender_user,
&sender_user,
&body.room_id,
AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
@ -137,7 +129,7 @@ pub async fn create_receipt_route( @@ -137,7 +129,7 @@ pub async fn create_receipt_route(
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(create_receipt::Response {}.into())
Ok(create_receipt::Response.into())
}

39
src/client_server/redact.rs

@ -1,63 +1,42 @@ @@ -1,63 +1,42 @@
use std::sync::Arc;
use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma};
use super::State;
use crate::{pdu::PduBuilder, ConduitResult, Database, Ruma};
use ruma::{
api::client::r0::redact::redact_event,
events::{room::redaction::RoomRedactionEventContent, EventType},
events::{room::redaction, EventType},
};
use std::sync::Arc;
#[cfg(feature = "conduit_bin")]
use rocket::put;
use serde_json::value::to_raw_value;
/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}`
///
/// Tries to send a redaction event into the room.
///
/// - TODO: Handle txn id
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn redact_event_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<redact_event::Request<'_>>,
) -> ConduitResult<redact_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
let event_id = db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomRedaction,
content: to_raw_value(&RoomRedactionEventContent {
content: serde_json::to_value(redaction::RedactionEventContent {
reason: body.reason.clone(),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: Some(body.event_id.into()),
redacts: Some(body.event_id.clone()),
},
sender_user,
&sender_user,
&body.room_id,
&db,
&state_lock,
)?;
drop(state_lock);
db.flush()?;
db.flush().await?;
let event_id = (*event_id).to_owned();
Ok(redact_event::Response { event_id }.into())
}

84
src/client_server/report.rs

@ -1,84 +0,0 @@ @@ -1,84 +0,0 @@
use crate::{
database::{admin::AdminCommand, DatabaseGuard},
ConduitResult, Error, Ruma,
};
use ruma::{
api::client::{error::ErrorKind, r0::room::report_content},
events::room::message,
int,
};
#[cfg(feature = "conduit_bin")]
use rocket::{http::RawStr, post};
/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
///
/// Reports an inappropriate event to homeserver admins
///
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/rooms/<_>/report/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn report_event_route(
db: DatabaseGuard,
body: Ruma<report_content::Request<'_>>,
) -> ConduitResult<report_content::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let pdu = match db.rooms.get_pdu(&body.event_id)? {
Some(pdu) => pdu,
_ => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid Event ID",
))
}
};
if body.score > int!(0) || body.score < int!(-100) {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid score, must be within 0 to -100",
));
};
if body.reason.chars().count() > 250 {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Reason too long, should be 250 characters or fewer",
));
};
db.admin.send(AdminCommand::SendMessage(
message::RoomMessageEventContent::text_html(
format!(
"Report received from: {}\n\n\
Event ID: {}\n\
Room ID: {}\n\
Sent By: {}\n\n\
Report Score: {}\n\
Report Reason: {}",
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
),
format!(
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0}\">{0}\
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1}</code>\
<a href=\"https://matrix.to/#/{2}/{1}\">🔗</a></li><li>Room ID: <code>{2}</code>\
</li><li>Sent By: <a href=\"https://matrix.to/#/{3}\">{3}</a></li></ul></li><li>\
Report Info<ul><li>Report Score: {4}</li><li>Report Reason: {5}</li></ul></li>\
</ul></details>",
sender_user,
pdu.event_id,
pdu.room_id,
pdu.sender,
body.score,
RawStr::new(&body.reason).html_escape()
),
),
));
db.flush()?;
Ok(report_content::Response {}.into())
}

514
src/client_server/room.rs

@ -1,212 +1,93 @@ @@ -1,212 +1,93 @@
use crate::{
client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Error,
Ruma,
};
use super::State;
use crate::{client_server::invite_helper, pdu::PduBuilder, ConduitResult, Database, Error, Ruma};
use log::info;
use ruma::{
api::client::{
error::ErrorKind,
r0::room::{self, aliases, create_room, get_room_event, upgrade_room},
r0::room::{self, create_room, get_room_event, upgrade_room},
},
events::{
room::{
canonical_alias::RoomCanonicalAliasEventContent,
create::RoomCreateEventContent,
guest_access::{GuestAccess, RoomGuestAccessEventContent},
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
join_rules::{JoinRule, RoomJoinRulesEventContent},
member::{MembershipState, RoomMemberEventContent},
name::RoomNameEventContent,
power_levels::RoomPowerLevelsEventContent,
tombstone::RoomTombstoneEventContent,
topic::RoomTopicEventContent,
},
room::{guest_access, history_visibility, join_rules, member, name, topic},
EventType,
},
int,
serde::{CanonicalJsonObject, JsonObject},
serde::Raw,
RoomAliasId, RoomId, RoomVersionId,
};
use serde_json::{json, value::to_raw_value};
use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc};
use tracing::{info, warn};
use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc};
#[cfg(feature = "conduit_bin")]
use rocket::{get, post};
/// # `POST /_matrix/client/r0/createRoom`
///
/// Creates a new room.
///
/// - Room ID is randomly generated
/// - Create alias if room_alias_name is set
/// - Send create event
/// - Join sender user
/// - Send power levels event
/// - Send canonical room alias
/// - Send join rules
/// - Send history visibility
/// - Send guest access
/// - Send events listed in initial state
/// - Send events implied by `name` and `topic`
/// - Send invite events
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/createRoom", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn create_room_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<create_room::Request<'_>>,
) -> ConduitResult<create_room::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let room_id = RoomId::new(db.globals.server_name());
db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?;
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
if !db.globals.allow_room_creation()
&& !body.from_appservice
&& !db.users.is_admin(sender_user, &db.rooms, &db.globals)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Room creation has been disabled.",
));
}
let alias: Option<Box<RoomAliasId>> =
body.room_alias_name
.as_ref()
.map_or(Ok(None), |localpart| {
// TODO: Check for invalid characters and maximum length
let alias =
RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name()))
.map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.")
})?;
if db.rooms.id_from_alias(&alias)?.is_some() {
Err(Error::BadRequest(
ErrorKind::RoomInUse,
"Room alias already exists.",
))
} else {
Ok(Some(alias))
}
})?;
let room_version = match body.room_version.clone() {
Some(room_version) => {
if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 {
room_version
let alias = body
.room_alias_name
.as_ref()
.map_or(Ok(None), |localpart| {
// TODO: Check for invalid characters and maximum length
let alias =
RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
if db.rooms.id_from_alias(&alias)?.is_some() {
Err(Error::BadRequest(
ErrorKind::RoomInUse,
"Room alias already exists.",
))
} else {
return Err(Error::BadRequest(
ErrorKind::UnsupportedRoomVersion,
"This server does not support that room version.",
));
Ok(Some(alias))
}
}
None => RoomVersionId::V6,
};
let content = match &body.creation_content {
Some(content) => {
let mut content = content
.deserialize_as::<CanonicalJsonObject>()
.expect("Invalid creation content");
content.insert(
"creator".into(),
json!(&sender_user).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
})?,
);
content.insert(
"room_version".into(),
json!(room_version.as_str()).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
})?,
);
content
}
None => {
let mut content = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
.get(),
)
.unwrap();
content.insert(
"room_version".into(),
json!(room_version.as_str()).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
})?,
);
content
}
};
})?;
// Validate creation content
let de_result = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&content)
.expect("Invalid creation content")
.get(),
);
if de_result.is_err() {
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Invalid creation content",
));
}
let mut content = ruma::events::room::create::CreateEventContent::new(sender_user.clone());
content.federate = body.creation_content.federate;
content.predecessor = body.creation_content.predecessor.clone();
content.room_version = RoomVersionId::Version6;
// 1. The room create event
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomCreate,
content: to_raw_value(&content).expect("event is valid, we just created it"),
content: serde_json::to_value(content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
// 2. Let the room creator join
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: db.users.displayname(sender_user)?,
avatar_url: db.users.avatar_url(sender_user)?,
content: serde_json::to_value(member::MemberEventContent {
membership: member::MembershipState::Join,
displayname: db.users.displayname(&sender_user)?,
avatar_url: db.users.avatar_url(&sender_user)?,
is_direct: Some(body.is_direct),
third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(sender_user.to_string()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
// 3. Power levels
@ -218,29 +99,32 @@ pub async fn create_room_route( @@ -218,29 +99,32 @@ pub async fn create_room_route(
.unwrap_or_else(|| match &body.visibility {
room::Visibility::Private => create_room::RoomPreset::PrivateChat,
room::Visibility::Public => create_room::RoomPreset::PublicChat,
_ => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom
room::Visibility::_Custom(_) => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom
});
let mut users = BTreeMap::new();
users.insert(sender_user.clone(), int!(100));
users.insert(sender_user.clone(), 100.into());
if preset == create_room::RoomPreset::TrustedPrivateChat {
for invite_ in &body.invite {
users.insert(invite_.clone(), int!(100));
users.insert(invite_.clone(), 100.into());
}
}
let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent {
users,
..Default::default()
})
.expect("event is valid, we just created it");
let mut power_levels_content =
serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent {
users,
..Default::default()
})
.expect("event is valid, we just created it");
if let Some(power_level_content_override) = &body.power_level_content_override {
let json: JsonObject = serde_json::from_str(power_level_content_override.json().get())
.map_err(|_| {
Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.")
})?;
let json = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(
power_level_content_override.json().get(),
)
.map_err(|_| {
Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.")
})?;
for (key, value) in json {
power_levels_content[key] = value;
@ -250,104 +134,90 @@ pub async fn create_room_route( @@ -250,104 +134,90 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomPowerLevels,
content: to_raw_value(&power_levels_content)
.expect("to_raw_value always works on serde_json::Value"),
content: power_levels_content,
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
// 4. Canonical room alias
if let Some(room_alias_id) = &alias {
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent {
alias: Some(room_alias_id.to_owned()),
alt_aliases: vec![],
})
.expect("We checked that alias earlier, it must be fine"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_user,
&room_id,
&db,
&state_lock,
)?;
}
// 5. Events set by preset
// 4. Events set by preset
// 5.1 Join Rules
// 4.1 Join Rules
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomJoinRules,
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
create_room::RoomPreset::PublicChat => JoinRule::Public,
content: match preset {
create_room::RoomPreset::PublicChat => serde_json::to_value(
join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public),
)
.expect("event is valid, we just created it"),
// according to spec "invite" is the default
_ => JoinRule::Invite,
}))
.expect("event is valid, we just created it"),
_ => serde_json::to_value(join_rules::JoinRulesEventContent::new(
join_rules::JoinRule::Invite,
))
.expect("event is valid, we just created it"),
},
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
// 5.2 History Visibility
// 4.2 History Visibility
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomHistoryVisibility,
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
HistoryVisibility::Shared,
content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new(
history_visibility::HistoryVisibility::Shared,
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
// 5.3 Guest Access
// 4.3 Guest Access
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomGuestAccess,
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
create_room::RoomPreset::PublicChat => GuestAccess::Forbidden,
_ => GuestAccess::CanJoin,
}))
.expect("event is valid, we just created it"),
content: match preset {
create_room::RoomPreset::PublicChat => {
serde_json::to_value(guest_access::GuestAccessEventContent::new(
guest_access::GuestAccess::Forbidden,
))
.expect("event is valid, we just created it")
}
_ => serde_json::to_value(guest_access::GuestAccessEventContent::new(
guest_access::GuestAccess::CanJoin,
))
.expect("event is valid, we just created it"),
},
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
// 6. Events listed in initial_state
// 5. Events listed in initial_state
for event in &body.initial_state {
let pdu_builder = PduBuilder::from(event.deserialize().map_err(|e| {
warn!("Invalid initial state event: {:?}", e);
Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.")
})?);
let pdu_builder = serde_json::from_str::<PduBuilder>(
&serde_json::to_string(&event).expect("AnyInitialStateEvent::to_string always works"),
)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event."))?;
// Silently skip encryption events if they are not allowed
if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() {
@ -355,24 +225,27 @@ pub async fn create_room_route( @@ -355,24 +225,27 @@ pub async fn create_room_route(
}
db.rooms
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?;
.build_and_append_pdu(pdu_builder, &sender_user, &room_id, &db)?;
}
// 7. Events implied by name and topic
// 6. Events implied by name and topic
if let Some(name) = &body.name {
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
.expect("event is valid, we just created it"),
content: serde_json::to_value(
name::NameEventContent::new(name.clone()).map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Name is invalid.")
})?,
)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
}
@ -380,7 +253,7 @@ pub async fn create_room_route( @@ -380,7 +253,7 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomTopic,
content: to_raw_value(&RoomTopicEventContent {
content: serde_json::to_value(topic::TopicEventContent {
topic: topic.clone(),
})
.expect("event is valid, we just created it"),
@ -388,15 +261,13 @@ pub async fn create_room_route( @@ -388,15 +261,13 @@ pub async fn create_room_route(
state_key: Some("".to_owned()),
redacts: None,
},
sender_user,
&sender_user,
&room_id,
&db,
&state_lock,
)?;
}
// 8. Events implied by invite (and TODO: invite_3pid)
drop(state_lock);
// 7. Events implied by invite (and TODO: invite_3pid)
for user_id in &body.invite {
let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await;
}
@ -412,23 +283,18 @@ pub async fn create_room_route( @@ -412,23 +283,18 @@ pub async fn create_room_route(
info!("{} created a room", sender_user);
db.flush()?;
db.flush().await?;
Ok(create_room::Response::new(room_id).into())
}
/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}`
///
/// Gets a single event.
///
/// - You have to currently be joined to the room (TODO: Respect history visibility)
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_room_event_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_room_event::Request<'_>>,
) -> ConduitResult<get_room_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -450,61 +316,19 @@ pub async fn get_room_event_route( @@ -450,61 +316,19 @@ pub async fn get_room_event_route(
.into())
}
/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases`
///
/// Lists all aliases of the room.
///
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/rooms/<_>/aliases", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_room_aliases_route(
db: DatabaseGuard,
body: Ruma<aliases::Request<'_>>,
) -> ConduitResult<aliases::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if !db.rooms.is_joined(sender_user, &body.room_id)? {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"You don't have permission to view this room.",
));
}
Ok(aliases::Response {
aliases: db
.rooms
.room_aliases(&body.room_id)
.filter_map(|a| a.ok())
.collect(),
}
.into())
}
/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade`
///
/// Upgrades the room.
///
/// - Creates a replacement room
/// - Sends a tombstone event into the current room
/// - Sender user joins the room
/// - Transfers some state events
/// - Moves local aliases
/// - Modifies old room power levels to prevent users from speaking
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/rooms/<_>/upgrade", data = "<body>")
post("/_matrix/client/r0/rooms/<_room_id>/upgrade", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn upgrade_room_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<upgrade_room::Request<'_>>,
_room_id: String,
) -> ConduitResult<upgrade_room::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) {
if !matches!(body.new_version, RoomVersionId::Version6) {
return Err(Error::BadRequest(
ErrorKind::UnsupportedRoomVersion,
"This server does not support that room version.",
@ -513,26 +337,14 @@ pub async fn upgrade_room_route( @@ -513,26 +337,14 @@ pub async fn upgrade_room_route(
// Create a replacement room
let replacement_room = RoomId::new(db.globals.server_name());
db.rooms
.get_or_create_shortroomid(&replacement_room, &db.globals)?;
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(body.room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
// Fail if the sender does not have the required permissions
let tombstone_event_id = db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomTombstone,
content: to_raw_value(&RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
content: serde_json::to_value(ruma::events::room::tombstone::TombstoneEventContent {
body: "This room has been replaced".to_string(),
replacement_room: replacement_room.clone(),
})
.expect("event is valid, we just created it"),
@ -543,75 +355,38 @@ pub async fn upgrade_room_route( @@ -543,75 +355,38 @@ pub async fn upgrade_room_route(
sender_user,
&body.room_id,
&db,
&state_lock,
)?;
// Change lock to replacement room
drop(state_lock);
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(replacement_room.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
// Get the old room creation event
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
// Get the old room federations status
let federate = serde_json::from_value::<Raw<ruma::events::room::create::CreateEventContent>>(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomCreate, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content
.get(),
.clone(),
)
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid room event in database."))?
.federate;
// Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
body.room_id.clone(),
(*tombstone_event_id).to_owned(),
tombstone_event_id,
));
// Send a m.room.create event containing a predecessor field and the applicable room_version
create_event_content.insert(
"creator".into(),
json!(&sender_user)
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
);
create_event_content.insert(
"room_version".into(),
json!(&body.new_version)
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
);
create_event_content.insert(
"predecessor".into(),
json!(predecessor)
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
);
// Validate creation event content
let de_result = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&create_event_content)
.expect("Error forming creation event")
.get(),
);
if de_result.is_err() {
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Error forming creation event",
));
}
let mut create_event_content =
ruma::events::room::create::CreateEventContent::new(sender_user.clone());
create_event_content.federate = federate;
create_event_content.room_version = body.new_version.clone();
create_event_content.predecessor = predecessor;
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomCreate,
content: to_raw_value(&create_event_content)
content: serde_json::to_value(create_event_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
@ -620,22 +395,18 @@ pub async fn upgrade_room_route( @@ -620,22 +395,18 @@ pub async fn upgrade_room_route(
sender_user,
&replacement_room,
&db,
&state_lock,
)?;
// Join the new room
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: db.users.displayname(sender_user)?,
avatar_url: db.users.avatar_url(sender_user)?,
content: serde_json::to_value(member::MemberEventContent {
membership: member::MembershipState::Join,
displayname: db.users.displayname(&sender_user)?,
avatar_url: db.users.avatar_url(&sender_user)?,
is_direct: None,
third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
@ -645,7 +416,6 @@ pub async fn upgrade_room_route( @@ -645,7 +416,6 @@ pub async fn upgrade_room_route(
sender_user,
&replacement_room,
&db,
&state_lock,
)?;
// Recommended transferable state events list from the specs
@ -679,7 +449,6 @@ pub async fn upgrade_room_route( @@ -679,7 +449,6 @@ pub async fn upgrade_room_route(
sender_user,
&replacement_room,
&db,
&state_lock,
)?;
}
@ -690,17 +459,23 @@ pub async fn upgrade_room_route( @@ -690,17 +459,23 @@ pub async fn upgrade_room_route(
}
// Get the old room power levels
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content
.get(),
)
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
let mut power_levels_event_content =
serde_json::from_value::<Raw<ruma::events::room::power_levels::PowerLevelsEventContent>>(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content
.clone(),
)
.expect("database contains invalid PDU")
.deserialize()
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
// Setting events_default and invite to the greater of 50 and users_default + 1
let new_level = max(int!(50), power_levels_event_content.users_default + int!(1));
let new_level = max(
50.into(),
power_levels_event_content.users_default + 1.into(),
);
power_levels_event_content.events_default = new_level;
power_levels_event_content.invite = new_level;
@ -708,7 +483,7 @@ pub async fn upgrade_room_route( @@ -708,7 +483,7 @@ pub async fn upgrade_room_route(
let _ = db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomPowerLevels,
content: to_raw_value(&power_levels_event_content)
content: serde_json::to_value(power_levels_event_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
@ -717,12 +492,9 @@ pub async fn upgrade_room_route( @@ -717,12 +492,9 @@ pub async fn upgrade_room_route(
sender_user,
&body.room_id,
&db,
&state_lock,
)?;
drop(state_lock);
db.flush()?;
db.flush().await?;
// Return the replacement room id
Ok(upgrade_room::Response { replacement_room }.into())

21
src/client_server/search.rs

@ -1,23 +1,20 @@ @@ -1,23 +1,20 @@
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::api::client::{error::ErrorKind, r0::search::search_events};
use std::sync::Arc;
#[cfg(feature = "conduit_bin")]
use rocket::post;
use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult};
use std::collections::BTreeMap;
/// # `POST /_matrix/client/r0/search`
///
/// Searches rooms for messages.
///
/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/search", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn search_events_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<search_events::Request<'_>>,
) -> ConduitResult<search_events::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -27,7 +24,7 @@ pub async fn search_events_route( @@ -27,7 +24,7 @@ pub async fn search_events_route(
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
db.rooms
.rooms_joined(sender_user)
.rooms_joined(&sender_user)
.filter_map(|r| r.ok())
.collect()
});
@ -74,7 +71,7 @@ pub async fn search_events_route( @@ -74,7 +71,7 @@ pub async fn search_events_route(
}
}
let results: Vec<_> = results
let results = results
.iter()
.map(|result| {
Ok::<_, Error>(SearchResult {
@ -88,14 +85,14 @@ pub async fn search_events_route( @@ -88,14 +85,14 @@ pub async fn search_events_route(
rank: None,
result: db
.rooms
.get_pdu_from_id(result)?
.get_pdu_from_id(&result)?
.map(|pdu| pdu.to_room_event()),
})
})
.filter_map(|r| r.ok())
.skip(skip)
.take(limit)
.collect();
.collect::<Vec<_>>();
let next_batch = if results.len() < limit as usize {
None
@ -114,7 +111,7 @@ pub async fn search_events_route( @@ -114,7 +111,7 @@ pub async fn search_events_route(
.search_term
.split_terminator(|c: char| !c.is_alphanumeric())
.map(str::to_lowercase)
.collect(),
.collect::<Vec<_>>(),
},
})
.into())

66
src/client_server/session.rs

@ -1,17 +1,16 @@ @@ -1,17 +1,16 @@
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma};
use std::sync::Arc;
use super::{State, DEVICE_ID_LENGTH, TOKEN_LENGTH};
use crate::{utils, ConduitResult, Database, Error, Ruma};
use log::info;
use ruma::{
api::client::{
error::ErrorKind,
r0::{
session::{get_login_types, login, logout, logout_all},
uiaa::IncomingUserIdentifier,
},
r0::session::{get_login_types, login, logout, logout_all},
},
UserId,
};
use serde::Deserialize;
use tracing::info;
#[derive(Debug, Deserialize)]
struct Claims {
@ -24,7 +23,7 @@ use rocket::{get, post}; @@ -24,7 +23,7 @@ use rocket::{get, post};
/// # `GET /_matrix/client/r0/login`
///
/// Get the supported login types of this server. One of these should be used as the `type` field
/// Get the homeserver's supported login types. One of these should be used as the `type` field
/// when logging in.
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))]
#[tracing::instrument]
@ -41,10 +40,9 @@ pub async fn get_login_types_route() -> ConduitResult<get_login_types::Response> @@ -41,10 +40,9 @@ pub async fn get_login_types_route() -> ConduitResult<get_login_types::Response>
///
/// Authenticates the user and returns an access token it can use in subsequent requests.
///
/// - The user needs to authenticate using their password (or if enabled using a json web token)
/// - If `device_id` is known: invalidates old access token of that device
/// - If `device_id` is unknown: creates a new device
/// - Returns access token that is associated with the user and device
/// - The returned access token is associated with the user and device
/// - Old access tokens of that device should be invalidated
/// - If `device_id` is unknown, a new device will be created
///
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
/// supported login types.
@ -54,17 +52,17 @@ pub async fn get_login_types_route() -> ConduitResult<get_login_types::Response> @@ -54,17 +52,17 @@ pub async fn get_login_types_route() -> ConduitResult<get_login_types::Response>
)]
#[tracing::instrument(skip(db, body))]
pub async fn login_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<login::Request<'_>>,
) -> ConduitResult<login::Response> {
// Validate login method
// TODO: Other login methods
let user_id = match &body.login_info {
login::IncomingLoginInfo::Password(login::IncomingPassword {
login::IncomingLoginInfo::Password {
identifier,
password,
}) => {
let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier {
} => {
let username = if let login::IncomingUserIdentifier::MatrixId(matrix_id) = identifier {
matrix_id
} else {
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
@ -97,11 +95,11 @@ pub async fn login_route( @@ -97,11 +95,11 @@ pub async fn login_route(
user_id
}
login::IncomingLoginInfo::Token(login::IncomingToken { token }) => {
login::IncomingLoginInfo::Token { token } => {
if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() {
let token = jsonwebtoken::decode::<Claims>(
token,
jwt_decoding_key,
&token,
&jwt_decoding_key,
&jsonwebtoken::Validation::default(),
)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
@ -116,12 +114,6 @@ pub async fn login_route( @@ -116,12 +114,6 @@ pub async fn login_route(
));
}
}
_ => {
return Err(Error::BadRequest(
ErrorKind::Unknown,
"Unsupported login type.",
));
}
};
// Generate new device id if the user didn't specify one
@ -153,7 +145,7 @@ pub async fn login_route( @@ -153,7 +145,7 @@ pub async fn login_route(
info!("{} logged in", user_id);
db.flush()?;
db.flush().await?;
Ok(login::Response {
user_id,
@ -169,25 +161,23 @@ pub async fn login_route( @@ -169,25 +161,23 @@ pub async fn login_route(
///
/// Log out the current device.
///
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
/// - Invalidates the access token
/// - Deletes the device and most of it's data (to-device events, last seen, etc.)
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/logout", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn logout_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<logout::Request>,
) -> ConduitResult<logout::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
db.users.remove_device(sender_user, sender_device)?;
db.users.remove_device(&sender_user, sender_device)?;
db.flush()?;
db.flush().await?;
Ok(logout::Response::new().into())
}
@ -197,9 +187,7 @@ pub async fn logout_route( @@ -197,9 +187,7 @@ pub async fn logout_route(
/// Log out all devices of this user.
///
/// - Invalidates all access tokens
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets all to-device events
/// - Triggers device list updates
/// - Deletes devices and most of their data (to-device events, last seen, etc.)
///
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
/// from each device of this user.
@ -209,16 +197,16 @@ pub async fn logout_route( @@ -209,16 +197,16 @@ pub async fn logout_route(
)]
#[tracing::instrument(skip(db, body))]
pub async fn logout_all_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<logout_all::Request>,
) -> ConduitResult<logout_all::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
for device_id in db.users.all_device_ids(sender_user).flatten() {
db.users.remove_device(sender_user, &device_id)?;
db.users.remove_device(&sender_user, &device_id)?;
}
db.flush()?;
db.flush().await?;
Ok(logout_all::Response::new().into())
}

103
src/client_server/state.rs

@ -1,8 +1,7 @@ @@ -1,8 +1,7 @@
use std::sync::Arc;
use crate::{
database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma,
};
use super::State;
use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
@ -10,8 +9,8 @@ use ruma::{ @@ -10,8 +9,8 @@ use ruma::{
},
events::{
room::{
canonical_alias::RoomCanonicalAliasEventContent,
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
canonical_alias::CanonicalAliasEventContent,
history_visibility::{HistoryVisibility, HistoryVisibilityEventContent},
},
AnyStateEventContent, EventType,
},
@ -22,20 +21,13 @@ use ruma::{ @@ -22,20 +21,13 @@ use ruma::{
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`
///
/// Sends a state event into the room.
///
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
/// - If event is new canonical_alias: Rejects if alias is incorrect
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn send_state_event_for_key_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<send_state_event::Request<'_>>,
) -> ConduitResult<send_state_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -50,38 +42,22 @@ pub async fn send_state_event_for_key_route( @@ -50,38 +42,22 @@ pub async fn send_state_event_for_key_route(
)
.await?;
db.flush()?;
db.flush().await?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::Response { event_id }.into())
}
/// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}`
///
/// Sends a state event into the room.
///
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
/// - If event is new canonical_alias: Rejects if alias is incorrect
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn send_state_event_for_empty_key_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<send_state_event::Request<'_>>,
) -> ConduitResult<send_state_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
// Forbid m.room.encryption if encryption is disabled
if &body.event_type == "m.room.encryption" && !db.globals.allow_encryption() {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption has been disabled",
));
}
let event_id = send_state_event_for_key_helper(
&db,
sender_user,
@ -92,24 +68,18 @@ pub async fn send_state_event_for_empty_key_route( @@ -92,24 +68,18 @@ pub async fn send_state_event_for_empty_key_route(
)
.await?;
db.flush()?;
db.flush().await?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::Response { event_id }.into())
}
/// # `GET /_matrix/client/r0/rooms/{roomid}/state`
///
/// Get all state events for a room.
///
/// - If not joined: Only works if current room history visibility is world readable
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/rooms/<_>/state", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_state_events_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_state_events::Request<'_>>,
) -> ConduitResult<get_state_events::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -122,13 +92,13 @@ pub async fn get_state_events_route( @@ -122,13 +92,13 @@ pub async fn get_state_events_route(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| {
serde_json::from_str(event.content.get())
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone())
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
)
})
.map(|e| e.history_visibility)
}),
Some(Ok(HistoryVisibility::WorldReadable))
)
@ -150,18 +120,13 @@ pub async fn get_state_events_route( @@ -150,18 +120,13 @@ pub async fn get_state_events_route(
.into())
}
/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}`
///
/// Get single state event of a room.
///
/// - If not joined: Only works if current room history visibility is world readable
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_state_events_for_key_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_state_events_for_key::Request<'_>>,
) -> ConduitResult<get_state_events_for_key::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -174,13 +139,13 @@ pub async fn get_state_events_for_key_route( @@ -174,13 +139,13 @@ pub async fn get_state_events_for_key_route(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| {
serde_json::from_str(event.content.get())
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone())
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
)
})
.map(|e| e.history_visibility)
}),
Some(Ok(HistoryVisibility::WorldReadable))
)
@ -200,24 +165,19 @@ pub async fn get_state_events_for_key_route( @@ -200,24 +165,19 @@ pub async fn get_state_events_for_key_route(
))?;
Ok(get_state_events_for_key::Response {
content: serde_json::from_str(event.content.get())
content: serde_json::from_value(event.content.clone())
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
}
.into())
}
/// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}`
///
/// Get single state event of a room.
///
/// - If not joined: Only works if current room history visibility is world readable
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_state_events_for_empty_key_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_state_events_for_key::Request<'_>>,
) -> ConduitResult<get_state_events_for_key::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -230,13 +190,13 @@ pub async fn get_state_events_for_empty_key_route( @@ -230,13 +190,13 @@ pub async fn get_state_events_for_empty_key_route(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| {
serde_json::from_str(event.content.get())
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone())
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
)
})
.map(|e| e.history_visibility)
}),
Some(Ok(HistoryVisibility::WorldReadable))
)
@ -256,26 +216,24 @@ pub async fn get_state_events_for_empty_key_route( @@ -256,26 +216,24 @@ pub async fn get_state_events_for_empty_key_route(
))?;
Ok(get_state_events_for_key::Response {
content: serde_json::from_str(event.content.get())
content: serde_json::from_value(event.content.clone())
.map_err(|_| Error::bad_database("Invalid event content in database"))?,
}
.into())
}
async fn send_state_event_for_key_helper(
pub async fn send_state_event_for_key_helper(
db: &Database,
sender: &UserId,
room_id: &RoomId,
event_type: EventType,
json: &Raw<AnyStateEventContent>,
state_key: String,
) -> Result<Arc<EventId>> {
) -> Result<EventId> {
let sender_user = sender;
// TODO: Review this check, error if event is unparsable, use event type, allow alias if it
// previously existed
if let Ok(canonical_alias) =
serde_json::from_str::<RoomCanonicalAliasEventContent>(json.json().get())
serde_json::from_str::<CanonicalAliasEventContent>(json.json().get())
{
let mut aliases = canonical_alias.alt_aliases.clone();
@ -300,16 +258,6 @@ async fn send_state_event_for_key_helper( @@ -300,16 +258,6 @@ async fn send_state_event_for_key_helper(
}
}
let mutex_state = Arc::clone(
db.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(room_id.to_owned())
.or_default(),
);
let state_lock = mutex_state.lock().await;
let event_id = db.rooms.build_and_append_pdu(
PduBuilder {
event_type,
@ -318,10 +266,9 @@ async fn send_state_event_for_key_helper( @@ -318,10 +266,9 @@ async fn send_state_event_for_key_helper(
state_key: Some(state_key),
redacts: None,
},
sender_user,
room_id,
db,
&state_lock,
&sender_user,
&room_id,
&db,
)?;
Ok(event_id)

550
src/client_server/sync.rs

@ -1,21 +1,19 @@ @@ -1,21 +1,19 @@
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse};
use super::State;
use crate::{ConduitResult, Database, Error, Result, Ruma, RumaResponse};
use log::error;
use ruma::{
api::client::r0::{sync::sync_events, uiaa::UiaaResponse},
events::{
room::member::{MembershipState, RoomMemberEventContent},
AnySyncEphemeralRoomEvent, EventType,
},
events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType},
serde::Raw,
DeviceId, RoomId, UserId,
};
use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::TryInto,
collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto},
sync::Arc,
time::Duration,
};
use tokio::sync::watch::Sender;
use tracing::error;
#[cfg(feature = "conduit_bin")]
use rocket::{get, tokio};
@ -25,49 +23,25 @@ use rocket::{get, tokio}; @@ -25,49 +23,25 @@ use rocket::{get, tokio};
/// Synchronize the client's state with the latest state on the server.
///
/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a
/// previous request for incremental syncs.
///
/// Calling this endpoint without a `since` parameter returns:
/// - Some of the most recent events of each timeline
/// - Notification counts for each room
/// - Joined and invited member counts, heroes
/// - All state events
///
/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns:
/// For joined rooms:
/// - Some of the most recent events of each timeline that happened after since
/// - If user joined the room after since: All state events and device list updates in that room
/// - If the user was already in the room: A list of all events that are in the state now, but were
/// not in the state at `since`
/// - If the state we send contains a member event: Joined and invited member counts, heroes
/// - Device list updates that happened after `since`
/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts
/// - EDUs that are active now (read receipts, typing updates, presence)
///
/// For invited rooms:
/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
///
/// For left rooms:
/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave)
///
/// - Sync is handled in an async task, multiple requests from the same device with the same
/// `since` will be cached
/// previous request.
/// - Calling this endpoint without a `since` parameter will return all recent events, the state
/// of all rooms and more data. This should only be called on the initial login of the device.
/// - To get incremental updates, you can call this endpoint with a `since` parameter. This will
/// return all recent events, state updates and more data that happened since the last /sync
/// request.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/sync", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn sync_events_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<sync_events::Request<'_>>,
) -> Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated");
let body = body.body;
let arc_db = Arc::new(db);
) -> std::result::Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut rx = match arc_db
let mut rx = match db
.globals
.sync_receivers
.write()
@ -78,7 +52,7 @@ pub async fn sync_events_route( @@ -78,7 +52,7 @@ pub async fn sync_events_route(
let (tx, rx) = tokio::sync::watch::channel(None);
tokio::spawn(sync_helper_wrapper(
Arc::clone(&arc_db),
Arc::clone(&db),
sender_user.clone(),
sender_device.clone(),
body.since.clone(),
@ -94,7 +68,7 @@ pub async fn sync_events_route( @@ -94,7 +68,7 @@ pub async fn sync_events_route(
let (tx, rx) = tokio::sync::watch::channel(None);
tokio::spawn(sync_helper_wrapper(
Arc::clone(&arc_db),
Arc::clone(&db),
sender_user.clone(),
sender_device.clone(),
body.since.clone(),
@ -114,9 +88,7 @@ pub async fn sync_events_route( @@ -114,9 +88,7 @@ pub async fn sync_events_route(
let we_have_to_wait = rx.borrow().is_none();
if we_have_to_wait {
if let Err(e) = rx.changed().await {
error!("Error waiting for sync: {}", e);
}
let _ = rx.changed().await;
}
let result = match rx
@ -131,9 +103,9 @@ pub async fn sync_events_route( @@ -131,9 +103,9 @@ pub async fn sync_events_route(
result
}
async fn sync_helper_wrapper(
db: Arc<DatabaseGuard>,
sender_user: Box<UserId>,
pub async fn sync_helper_wrapper(
db: Arc<Database>,
sender_user: UserId,
sender_device: Box<DeviceId>,
since: Option<String>,
full_state: bool,
@ -170,20 +142,18 @@ async fn sync_helper_wrapper( @@ -170,20 +142,18 @@ async fn sync_helper_wrapper(
}
}
drop(db);
let _ = tx.send(Some(r.map(|(r, _)| r.into())));
}
async fn sync_helper(
db: Arc<DatabaseGuard>,
sender_user: Box<UserId>,
db: Arc<Database>,
sender_user: UserId,
sender_device: Box<DeviceId>,
since: Option<String>,
full_state: bool,
timeout: Option<Duration>,
// bool = caching allowed
) -> Result<(sync_events::Response, bool), Error> {
) -> std::result::Result<(sync_events::Response, bool), Error> {
// TODO: match body.set_presence {
db.rooms.edus.ping_presence(&sender_user)?;
@ -211,26 +181,12 @@ async fn sync_helper( @@ -211,26 +181,12 @@ async fn sync_helper(
.filter_map(|r| r.ok()),
);
let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::<Vec<_>>();
for room_id in all_joined_rooms {
for room_id in db.rooms.rooms_joined(&sender_user) {
let room_id = room_id?;
// Get and drop the lock to wait for remaining operations to finish
// This will make sure the we have all events until next_batch
let mutex_insert = Arc::clone(
db.globals
.roomid_mutex_insert
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
let mut non_timeline_pdus = db
.rooms
.pdus_until(&sender_user, &room_id, u64::MAX)?
.pdus_until(&sender_user, &room_id, u64::MAX)
.filter_map(|r| {
// Filter out buggy events
if r.is_err() {
@ -245,13 +201,13 @@ async fn sync_helper( @@ -245,13 +201,13 @@ async fn sync_helper(
});
// Take the last 10 events for the timeline
let timeline_pdus: Vec<_> = non_timeline_pdus
let timeline_pdus = non_timeline_pdus
.by_ref()
.take(10)
.collect::<Vec<_>>()
.into_iter()
.rev()
.collect();
.collect::<Vec<_>>();
let send_notification_counts = !timeline_pdus.is_empty()
|| db
@ -266,75 +222,25 @@ async fn sync_helper( @@ -266,75 +222,25 @@ async fn sync_helper(
// Database queries:
let current_shortstatehash = db
.rooms
.current_shortstatehash(&room_id)?
.expect("All rooms have state");
let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?;
// Calculates joined_member_count, invited_member_count and heroes
let calculate_counts = || {
let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0);
let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0);
// Recalculate heroes (first 5 members)
let mut heroes = Vec::new();
if joined_member_count + invited_member_count <= 5 {
// Go through all PDUs and for each member event, check if the user is still joined or
// invited until we have 5 or we reach the end
for hero in db
.rooms
.all_pdus(&sender_user, &room_id)?
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == EventType::RoomMember)
.map(|(_, pdu)| {
let content: RoomMemberEventContent =
serde_json::from_str(pdu.content.get()).map_err(|_| {
Error::bad_database("Invalid member event in database.")
})?;
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.")
})?;
let current_shortstatehash = db.rooms.current_shortstatehash(&room_id)?;
// The membership was and still is invite or join
if matches!(
content.membership,
MembershipState::Join | MembershipState::Invite
) && (db.rooms.is_joined(&user_id, &room_id)?
|| db.rooms.is_invited(&user_id, &room_id)?)
{
Ok::<_, Error>(Some(state_key.clone()))
} else {
Ok(None)
}
} else {
Ok(None)
}
})
// Filter out buggy users
.filter_map(|u| u.ok())
// Filter for possible heroes
.flatten()
{
if heroes.contains(&hero) || hero == sender_user.as_str() {
continue;
}
// These type is Option<Option<_>>. The outer Option is None when there is no event between
// since and the current room state, meaning there should be no updates.
// The inner Option is None when there is an event, but there is no state hash associated
// with it. This can happen for the RoomCreate event, so all updates should arrive.
let first_pdu_before_since = db.rooms.pdus_until(&sender_user, &room_id, since).next();
heroes.push(hero);
}
}
let pdus_after_since = db
.rooms
.pdus_after(&sender_user, &room_id, since)
.next()
.is_some();
Ok::<_, Error>((
Some(joined_member_count),
Some(invited_member_count),
heroes,
))
};
let since_shortstatehash = first_pdu_before_since.as_ref().map(|pdu| {
db.rooms
.pdu_shortstatehash(&pdu.as_ref().ok()?.1.event_id)
.ok()?
});
let (
heroes,
@ -342,119 +248,121 @@ async fn sync_helper( @@ -342,119 +248,121 @@ async fn sync_helper(
invited_member_count,
joined_since_last_sync,
state_events,
) = if since_shortstatehash.is_none() {
// Probably since = 0, we will do an initial sync
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
let state_events: Vec<_> = current_state_ids
) = if pdus_after_since && Some(current_shortstatehash) != since_shortstatehash {
let current_state = db.rooms.room_state_full(&room_id)?;
let current_members = current_state
.iter()
.map(|(_, id)| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten())
.collect();
(
heroes,
joined_member_count,
invited_member_count,
true,
state_events,
)
} else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
// No state changes
(Vec::new(), None, None, false, Vec::new())
} else {
// Incremental /sync
let since_shortstatehash = since_shortstatehash.unwrap();
let since_sender_member: Option<RoomMemberEventContent> = db
.rooms
.state_get(
since_shortstatehash,
&EventType::RoomMember,
sender_user.as_str(),
)?
.and_then(|pdu| {
serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
});
let joined_since_last_sync = since_sender_member
.map_or(true, |member| member.membership != MembershipState::Join);
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?;
let state_events = if joined_since_last_sync {
current_state_ids
.iter()
.map(|(_, id)| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten())
.collect::<Vec<_>>()
} else {
current_state_ids
.iter()
.filter(|(key, id)| since_state_ids.get(key) != Some(id))
.map(|(_, id)| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten())
.collect()
};
let encrypted_room = db
.rooms
.state_get(current_shortstatehash, &EventType::RoomEncryption, "")?
.filter(|(key, _)| key.0 == EventType::RoomMember)
.map(|(key, value)| (&key.1, value)) // Only keep state key
.collect::<Vec<_>>();
let encrypted_room = current_state
.get(&(EventType::RoomEncryption, "".to_owned()))
.is_some();
let since_state = since_shortstatehash
.as_ref()
.map(|since_shortstatehash| {
since_shortstatehash
.map(|since_shortstatehash| db.rooms.state_full(since_shortstatehash))
.transpose()
})
.transpose()?;
let since_encryption =
db.rooms
.state_get(since_shortstatehash, &EventType::RoomEncryption, "")?;
let since_encryption = since_state.as_ref().map(|state| {
state
.as_ref()
.map(|state| state.get(&(EventType::RoomEncryption, "".to_owned())))
});
// Calculations:
let new_encrypted_room = encrypted_room && since_encryption.is_none();
let new_encrypted_room =
encrypted_room && since_encryption.map_or(true, |encryption| encryption.is_none());
let send_member_count = since_state.as_ref().map_or(true, |since_state| {
since_state.as_ref().map_or(true, |since_state| {
current_members.len()
!= since_state
.iter()
.filter(|(key, _)| key.0 == EventType::RoomMember)
.count()
})
});
let send_member_count = state_events
.iter()
.any(|event| event.kind == EventType::RoomMember);
let since_sender_member = since_state.as_ref().map(|since_state| {
since_state.as_ref().and_then(|state| {
state
.get(&(EventType::RoomMember, sender_user.as_str().to_owned()))
.and_then(|pdu| {
serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>,
>(pdu.content.clone())
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
})
})
});
if encrypted_room {
for state_event in &state_events {
if state_event.kind != EventType::RoomMember {
continue;
}
if let Some(state_key) = &state_event.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user {
continue;
}
let new_membership = serde_json::from_str::<RoomMemberEventContent>(
state_event.content.get(),
)
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
match new_membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
device_list_updates.insert(user_id);
}
for (user_id, current_member) in current_members {
let current_membership = serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>,
>(current_member.content.clone())
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
let since_membership =
since_state
.as_ref()
.map_or(MembershipState::Leave, |since_state| {
since_state
.as_ref()
.and_then(|since_state| {
since_state
.get(&(EventType::RoomMember, user_id.clone()))
.and_then(|since_member| {
serde_json::from_value::<
Raw<ruma::events::room::member::MemberEventContent>,
>(
since_member.content.clone()
)
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| {
Error::bad_database("Invalid PDU in database.")
})
.ok()
})
})
.map_or(MembershipState::Leave, |member| member.membership)
});
let user_id = UserId::try_from(user_id.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
match (since_membership, current_membership) {
(MembershipState::Leave, MembershipState::Join) => {
// A new user joined an encrypted room
if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
device_list_updates.insert(user_id);
}
MembershipState::Leave => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
}
_ => {}
}
// TODO: Remove, this should never happen here, right?
(MembershipState::Join, MembershipState::Leave) => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
}
_ => {}
}
}
}
let joined_since_last_sync = since_sender_member.map_or(true, |member| {
member.map_or(true, |member| member.membership != MembershipState::Join)
});
if joined_since_last_sync && encrypted_room || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_updates.extend(
@ -474,11 +382,100 @@ async fn sync_helper( @@ -474,11 +382,100 @@ async fn sync_helper(
}
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
calculate_counts()?
let joined_member_count = db.rooms.room_members(&room_id).count();
let invited_member_count = db.rooms.room_members_invited(&room_id).count();
// Recalculate heroes (first 5 members)
let mut heroes = Vec::new();
if joined_member_count + invited_member_count <= 5 {
// Go through all PDUs and for each member event, check if the user is still joined or
// invited until we have 5 or we reach the end
for hero in db
.rooms
.all_pdus(&sender_user, &room_id)
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == EventType::RoomMember)
.map(|(_, pdu)| {
let content = serde_json::from_value::<
ruma::events::room::member::MemberEventContent,
>(pdu.content.clone())
.map_err(|_| {
Error::bad_database("Invalid member event in database.")
})?;
if let Some(state_key) = &pdu.state_key {
let user_id =
UserId::try_from(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.")
})?;
// The membership was and still is invite or join
if matches!(
content.membership,
MembershipState::Join | MembershipState::Invite
) && (db.rooms.is_joined(&user_id, &room_id)?
|| db.rooms.is_invited(&user_id, &room_id)?)
{
Ok::<_, Error>(Some(state_key.clone()))
} else {
Ok(None)
}
} else {
Ok(None)
}
})
// Filter out buggy users
.filter_map(|u| u.ok())
// Filter for possible heroes
.flatten()
{
if heroes.contains(&hero) || hero == sender_user.as_str() {
continue;
}
heroes.push(hero);
}
}
(
Some(joined_member_count),
Some(invited_member_count),
heroes,
)
} else {
(None, None, Vec::new())
};
let state_events = if joined_since_last_sync {
current_state
.iter()
.map(|(_, pdu)| pdu.to_sync_state_event())
.collect()
} else {
match since_state {
None => Vec::new(),
Some(Some(since_state)) => current_state
.iter()
.filter(|(key, value)| {
since_state.get(key).map(|e| &e.event_id) != Some(&value.event_id)
})
.filter(|(_, value)| {
!timeline_pdus.iter().any(|(_, timeline_pdu)| {
timeline_pdu.kind == value.kind
&& timeline_pdu.state_key == value.state_key
})
})
.map(|(_, pdu)| pdu.to_sync_state_event())
.collect(),
Some(None) => current_state
.iter()
.map(|(_, pdu)| pdu.to_sync_state_event())
.collect(),
}
};
(
heroes,
joined_member_count,
@ -486,6 +483,8 @@ async fn sync_helper( @@ -486,6 +483,8 @@ async fn sync_helper(
joined_since_last_sync,
state_events,
)
} else {
(Vec::new(), None, None, false, Vec::new())
};
// Look for device list updates in this room
@ -523,18 +522,18 @@ async fn sync_helper( @@ -523,18 +522,18 @@ async fn sync_helper(
Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string()))
})?;
let room_events: Vec<_> = timeline_pdus
let room_events = timeline_pdus
.iter()
.map(|(_, pdu)| pdu.to_sync_room_event())
.collect();
.collect::<Vec<_>>();
let mut edus: Vec<_> = db
let mut edus = db
.rooms
.edus
.readreceipts_since(&room_id, since)
.filter_map(|r| r.ok()) // Filter out buggy events
.map(|(_, _, v)| v)
.collect();
.collect::<Vec<_>>();
if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since {
edus.push(
@ -548,10 +547,6 @@ async fn sync_helper( @@ -548,10 +547,6 @@ async fn sync_helper(
);
}
// Save the state after this sync so we can send the correct state diff next sync
db.rooms
.associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?;
let joined_room = sync_events::JoinedRoom {
account_data: sync_events::RoomAccountData {
events: db
@ -563,7 +558,7 @@ async fn sync_helper( @@ -563,7 +558,7 @@ async fn sync_helper(
.map_err(|_| Error::bad_database("Invalid account event in database."))
.ok()
})
.collect(),
.collect::<Vec<_>>(),
},
summary: sync_events::RoomSummary {
heroes,
@ -580,10 +575,7 @@ async fn sync_helper( @@ -580,10 +575,7 @@ async fn sync_helper(
events: room_events,
},
state: sync_events::State {
events: state_events
.iter()
.map(|pdu| pdu.to_sync_state_event())
.collect(),
events: state_events,
},
ephemeral: sync_events::Ephemeral { events: edus },
};
@ -599,10 +591,10 @@ async fn sync_helper( @@ -599,10 +591,10 @@ async fn sync_helper(
.presence_since(&room_id, since, &db.rooms, &db.globals)?
{
match presence_updates.entry(user_id) {
Entry::Vacant(v) => {
hash_map::Entry::Vacant(v) => {
v.insert(presence);
}
Entry::Occupied(mut o) => {
hash_map::Entry::Occupied(mut o) => {
let p = o.get_mut();
// Update existing presence event with more info
@ -628,22 +620,8 @@ async fn sync_helper( @@ -628,22 +620,8 @@ async fn sync_helper(
}
let mut left_rooms = BTreeMap::new();
let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect();
for result in all_left_rooms {
for result in db.rooms.rooms_left(&sender_user) {
let (room_id, left_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let mutex_insert = Arc::clone(
db.globals
.roomid_mutex_insert
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
// Left before last sync
@ -668,22 +646,8 @@ async fn sync_helper( @@ -668,22 +646,8 @@ async fn sync_helper(
}
let mut invited_rooms = BTreeMap::new();
let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect();
for result in all_invited_rooms {
for result in db.rooms.rooms_invited(&sender_user) {
let (room_id, invite_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let mutex_insert = Arc::clone(
db.globals
.roomid_mutex_insert
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;
// Invited before last sync
@ -732,12 +696,11 @@ async fn sync_helper( @@ -732,12 +696,11 @@ async fn sync_helper(
leave: left_rooms,
join: joined_rooms,
invite: invited_rooms,
knock: BTreeMap::new(), // TODO
},
presence: sync_events::Presence {
events: presence_updates
.into_iter()
.map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
.map(|(_, v)| Raw::from(v))
.collect(),
},
account_data: sync_events::GlobalAccountData {
@ -750,13 +713,19 @@ async fn sync_helper( @@ -750,13 +713,19 @@ async fn sync_helper(
.map_err(|_| Error::bad_database("Invalid account event in database."))
.ok()
})
.collect(),
.collect::<Vec<_>>(),
},
device_lists: sync_events::DeviceLists {
changed: device_list_updates.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?,
device_one_time_keys_count: if db.users.last_one_time_keys_update(&sender_user)? > since
|| since == 0
{
db.users.count_one_time_keys(&sender_user, &sender_device)?
} else {
BTreeMap::new()
},
to_device: sync_events::ToDevice {
events: db
.users
@ -770,6 +739,7 @@ async fn sync_helper( @@ -770,6 +739,7 @@ async fn sync_helper(
&& response.presence.is_empty()
&& response.account_data.is_empty()
&& response.device_lists.is_empty()
&& response.device_one_time_keys_count.is_empty()
&& response.to_device.is_empty()
{
// Hang a few seconds so requests are not spammed
@ -794,7 +764,7 @@ fn share_encrypted_room( @@ -794,7 +764,7 @@ fn share_encrypted_room(
) -> Result<bool> {
Ok(db
.rooms
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
.filter_map(|r| r.ok())
.filter(|room_id| room_id != ignore_room)
.filter_map(|other_room_id| {

57
src/client_server/tag.rs

@ -1,37 +1,30 @@ @@ -1,37 +1,30 @@
use crate::{database::DatabaseGuard, ConduitResult, Ruma};
use super::State;
use crate::{ConduitResult, Database, Ruma};
use ruma::{
api::client::r0::tag::{create_tag, delete_tag, get_tags},
events::{
tag::{TagEvent, TagEventContent},
EventType,
},
events::EventType,
};
use std::collections::BTreeMap;
use std::{collections::BTreeMap, sync::Arc};
#[cfg(feature = "conduit_bin")]
use rocket::{delete, get, put};
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
///
/// Adds a tag to the room.
///
/// - Inserts the tag into the tag event of the room account data.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn update_tag_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<create_tag::Request<'_>>,
) -> ConduitResult<create_tag::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut tags_event = db
.account_data
.get(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| TagEvent {
content: TagEventContent {
.get::<ruma::events::tag::TagEvent>(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| ruma::events::tag::TagEvent {
content: ruma::events::tag::TagEventContent {
tags: BTreeMap::new(),
},
});
@ -48,32 +41,27 @@ pub async fn update_tag_route( @@ -48,32 +41,27 @@ pub async fn update_tag_route(
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(create_tag::Response {}.into())
Ok(create_tag::Response.into())
}
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
///
/// Deletes a tag from the room.
///
/// - Removes the tag from the tag event of the room account data.
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn delete_tag_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<delete_tag::Request<'_>>,
) -> ConduitResult<delete_tag::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut tags_event = db
.account_data
.get(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| TagEvent {
content: TagEventContent {
.get::<ruma::events::tag::TagEvent>(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| ruma::events::tag::TagEvent {
content: ruma::events::tag::TagEventContent {
tags: BTreeMap::new(),
},
});
@ -87,23 +75,18 @@ pub async fn delete_tag_route( @@ -87,23 +75,18 @@ pub async fn delete_tag_route(
&db.globals,
)?;
db.flush()?;
db.flush().await?;
Ok(delete_tag::Response {}.into())
Ok(delete_tag::Response.into())
}
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
///
/// Returns tags on the room.
///
/// - Gets the tag event of the room account data.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn get_tags_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<get_tags::Request<'_>>,
) -> ConduitResult<get_tags::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -111,9 +94,9 @@ pub async fn get_tags_route( @@ -111,9 +94,9 @@ pub async fn get_tags_route(
Ok(get_tags::Response {
tags: db
.account_data
.get(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| TagEvent {
content: TagEventContent {
.get::<ruma::events::tag::TagEvent>(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| ruma::events::tag::TagEvent {
content: ruma::events::tag::TagEventContent {
tags: BTreeMap::new(),
},
})

3
src/client_server/thirdparty.rs

@ -5,9 +5,6 @@ use ruma::api::client::r0::thirdparty::get_protocols; @@ -5,9 +5,6 @@ use ruma::api::client::r0::thirdparty::get_protocols;
use rocket::get;
use std::collections::BTreeMap;
/// # `GET /_matrix/client/r0/thirdparty/protocols`
///
/// TODO: Fetches all metadata about protocols supported by the homeserver.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/thirdparty/protocols")

54
src/client_server/to_device.rs

@ -1,36 +1,28 @@ @@ -1,36 +1,28 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
use super::State;
use crate::{ConduitResult, Database, Error, Ruma};
use ruma::{
api::{
client::{error::ErrorKind, r0::to_device::send_event_to_device},
federation::{self, transactions::edu::DirectDeviceContent},
},
events::EventType,
api::client::{error::ErrorKind, r0::to_device::send_event_to_device},
to_device::DeviceIdOrAllDevices,
};
#[cfg(feature = "conduit_bin")]
use rocket::put;
/// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}`
///
/// Send a to-device event to a set of client devices.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn send_event_to_device_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<send_event_to_device::Request<'_>>,
) -> ConduitResult<send_event_to_device::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref();
// TODO: uncomment when https://github.com/vector-im/element-android/issues/3589 is solved
// Check if this is a new transaction id
/*
if db
.transaction_ids
.existing_txnid(sender_user, sender_device, &body.txn_id)?
@ -38,38 +30,14 @@ pub async fn send_event_to_device_route( @@ -38,38 +30,14 @@ pub async fn send_event_to_device_route(
{
return Ok(send_event_to_device::Response.into());
}
*/
for (target_user_id, map) in &body.messages {
for (target_device_id_maybe, event) in map {
if target_user_id.server_name() != db.globals.server_name() {
let mut map = BTreeMap::new();
map.insert(target_device_id_maybe.clone(), event.clone());
let mut messages = BTreeMap::new();
messages.insert(target_user_id.clone(), map);
db.sending.send_reliable_edu(
target_user_id.server_name(),
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
DirectDeviceContent {
sender: sender_user.clone(),
ev_type: EventType::from(&body.event_type),
message_id: body.txn_id.clone(),
messages,
},
))
.expect("DirectToDevice EDU can be serialized"),
db.globals.next_count()?,
)?;
continue;
}
match target_device_id_maybe {
DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event(
sender_user,
target_user_id,
target_device_id,
&target_user_id,
&target_device_id,
&body.event_type,
event.deserialize_as().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
@ -78,10 +46,10 @@ pub async fn send_event_to_device_route( @@ -78,10 +46,10 @@ pub async fn send_event_to_device_route(
)?,
DeviceIdOrAllDevices::AllDevices => {
for target_device_id in db.users.all_device_ids(target_user_id) {
for target_device_id in db.users.all_device_ids(&target_user_id) {
db.users.add_to_device_event(
sender_user,
target_user_id,
&target_user_id,
&target_device_id?,
&body.event_type,
event.deserialize_as().map_err(|_| {
@ -99,7 +67,7 @@ pub async fn send_event_to_device_route( @@ -99,7 +67,7 @@ pub async fn send_event_to_device_route(
db.transaction_ids
.add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
db.flush()?;
db.flush().await?;
Ok(send_event_to_device::Response {}.into())
Ok(send_event_to_device::Response.into())
}

16
src/client_server/typing.rs

@ -1,27 +1,27 @@ @@ -1,27 +1,27 @@
use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma};
use std::sync::Arc;
use super::State;
use crate::{utils, ConduitResult, Database, Ruma};
use create_typing_event::Typing;
use ruma::api::client::r0::typing::create_typing_event;
#[cfg(feature = "conduit_bin")]
use rocket::put;
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
///
/// Sets the typing state of the sender user.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub fn create_typing_event_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<create_typing_event::Request<'_>>,
) -> ConduitResult<create_typing_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if let Typing::Yes(duration) = body.state {
db.rooms.edus.typing_add(
sender_user,
&sender_user,
&body.room_id,
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
&db.globals,
@ -29,8 +29,8 @@ pub fn create_typing_event_route( @@ -29,8 +29,8 @@ pub fn create_typing_event_route(
} else {
db.rooms
.edus
.typing_remove(sender_user, &body.room_id, &db.globals)?;
.typing_remove(&sender_user, &body.room_id, &db.globals)?;
}
Ok(create_typing_event::Response {}.into())
Ok(create_typing_event::Response.into())
}

2
src/client_server/unversioned.rs

@ -10,7 +10,7 @@ use rocket::get; @@ -10,7 +10,7 @@ use rocket::get;
///
/// - Versions take the form MAJOR.MINOR.PATCH
/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value
/// - Unstable features are namespaced and may include version information in their name
/// - Unstable features should be namespaced and may include version information in their name
///
/// Note: Unstable features are used while developing new features. Clients should avoid using
/// unstable features in their stable releases

36
src/client_server/user_directory.rs

@ -1,21 +1,19 @@ @@ -1,21 +1,19 @@
use crate::{database::DatabaseGuard, ConduitResult, Ruma};
use std::sync::Arc;
use super::State;
use crate::{ConduitResult, Database, Ruma};
use ruma::api::client::r0::user_directory::search_users;
#[cfg(feature = "conduit_bin")]
use rocket::post;
/// # `POST /_matrix/client/r0/user_directory/search`
///
/// Searches all known users for a match.
///
/// - TODO: Hide users that are not in any public rooms?
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/user_directory/search", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn search_users_route(
db: DatabaseGuard,
db: State<'_, Arc<Database>>,
body: Ruma<search_users::Request<'_>>,
) -> ConduitResult<search_users::Response> {
let limit = u64::from(body.limit) as usize;
@ -30,22 +28,20 @@ pub async fn search_users_route( @@ -30,22 +28,20 @@ pub async fn search_users_route(
avatar_url: db.users.avatar_url(&user_id).ok()?,
};
let user_id_matches = user
if !user
.user_id
.to_string()
.to_lowercase()
.contains(&body.search_term.to_lowercase());
let user_displayname_matches = user
.display_name
.as_ref()
.filter(|name| {
name.to_lowercase()
.contains(&body.search_term.to_lowercase())
})
.is_some();
if !user_id_matches && !user_displayname_matches {
.contains(&body.search_term.to_lowercase())
&& user
.display_name
.as_ref()
.filter(|name| {
name.to_lowercase()
.contains(&body.search_term.to_lowercase())
})
.is_none()
{
return None;
}

58
src/client_server/voip.rs

@ -1,58 +1,18 @@ @@ -1,58 +1,18 @@
use crate::{database::DatabaseGuard, ConduitResult, Ruma};
use hmac::{Hmac, Mac, NewMac};
use crate::ConduitResult;
use ruma::api::client::r0::voip::get_turn_server_info;
use ruma::SecondsSinceUnixEpoch;
use sha1::Sha1;
use std::time::{Duration, SystemTime};
type HmacSha1 = Hmac<Sha1>;
use std::time::Duration;
#[cfg(feature = "conduit_bin")]
use rocket::get;
/// # `GET /_matrix/client/r0/voip/turnServer`
///
/// TODO: Returns information about the recommended turn server.
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/r0/voip/turnServer", data = "<body>")
)]
#[tracing::instrument(skip(body, db))]
pub async fn turn_server_route(
body: Ruma<get_turn_server_info::Request>,
db: DatabaseGuard,
) -> ConduitResult<get_turn_server_info::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let turn_secret = db.globals.turn_secret();
let (username, password) = if !turn_secret.is_empty() {
let expiry = SecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
)
.expect("time is valid");
let username: String = format!("{}:{}", expiry.get(), sender_user);
let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
.expect("HMAC can take key of any size");
mac.update(username.as_bytes());
let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD);
(username, password)
} else {
(
db.globals.turn_username().clone(),
db.globals.turn_password().clone(),
)
};
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))]
#[tracing::instrument]
pub async fn turn_server_route() -> ConduitResult<get_turn_server_info::Response> {
Ok(get_turn_server_info::Response {
username,
password,
uris: db.globals.turn_uris().to_vec(),
ttl: Duration::from_secs(db.globals.turn_ttl()),
username: "".to_owned(),
password: "".to_owned(),
uris: Vec::new(),
ttl: Duration::from_secs(60 * 60 * 24),
}
.into())
}

753
src/database.rs

@ -6,7 +6,6 @@ pub mod appservice; @@ -6,7 +6,6 @@ pub mod appservice;
pub mod globals;
pub mod key_backups;
pub mod media;
pub mod proxy;
pub mod pusher;
pub mod rooms;
pub mod sending;
@ -17,96 +16,42 @@ pub mod users; @@ -17,96 +16,42 @@ pub mod users;
use crate::{utils, Error, Result};
use abstraction::DatabaseEngine;
use directories::ProjectDirs;
use log::error;
use lru_cache::LruCache;
use rocket::{
futures::{channel::mpsc, stream::FuturesUnordered, StreamExt},
outcome::{try_outcome, IntoOutcome},
request::{FromRequest, Request},
Shutdown, State,
};
use ruma::{DeviceId, EventId, RoomId, ServerName, UserId};
use serde::{de::IgnoredAny, Deserialize};
use rocket::futures::{channel::mpsc, stream::FuturesUnordered, StreamExt};
use ruma::{DeviceId, ServerName, UserId};
use serde::Deserialize;
use std::{
collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto},
collections::HashMap,
fs::{self, remove_dir_all},
io::Write,
mem::size_of,
ops::Deref,
path::Path,
sync::{Arc, Mutex, RwLock},
sync::{Arc, RwLock},
};
use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore};
use tracing::{debug, error, warn};
use self::proxy::ProxyConfig;
use tokio::sync::Semaphore;
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
server_name: Box<ServerName>,
database_path: String,
#[serde(default = "default_db_cache_capacity_mb")]
db_cache_capacity_mb: f64,
#[serde(default = "default_pdu_cache_capacity")]
pdu_cache_capacity: u32,
#[serde(default = "default_sqlite_wal_clean_second_interval")]
sqlite_wal_clean_second_interval: u32,
#[serde(default = "default_cache_capacity")]
cache_capacity: u32,
#[serde(default = "default_max_request_size")]
max_request_size: u32,
#[serde(default = "default_max_concurrent_requests")]
max_concurrent_requests: u16,
#[serde(default = "false_fn")]
#[serde(default = "true_fn")]
allow_registration: bool,
#[serde(default = "true_fn")]
allow_encryption: bool,
#[serde(default = "false_fn")]
allow_federation: bool,
#[serde(default = "true_fn")]
allow_room_creation: bool,
#[serde(default = "false_fn")]
pub allow_jaeger: bool,
#[serde(default = "false_fn")]
pub tracing_flame: bool,
#[serde(default)]
proxy: ProxyConfig,
jwt_secret: Option<String>,
#[serde(default = "Vec::new")]
trusted_servers: Vec<Box<ServerName>>,
#[serde(default = "default_log")]
pub log: String,
#[serde(default)]
turn_username: String,
#[serde(default)]
turn_password: String,
#[serde(default = "Vec::new")]
turn_uris: Vec<String>,
#[serde(default)]
turn_secret: String,
#[serde(default = "default_turn_ttl")]
turn_ttl: u64,
#[serde(flatten)]
catchall: BTreeMap<String, IgnoredAny>,
}
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
impl Config {
pub fn warn_deprecated(&self) {
let mut was_deprecated = false;
for key in self
.catchall
.keys()
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
{
warn!("Config parameter {} is deprecated", key);
was_deprecated = true;
}
if was_deprecated {
warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
}
}
}
fn false_fn() -> bool {
@ -117,16 +62,8 @@ fn true_fn() -> bool { @@ -117,16 +62,8 @@ fn true_fn() -> bool {
true
}
fn default_db_cache_capacity_mb() -> f64 {
200.0
}
fn default_pdu_cache_capacity() -> u32 {
100_000
}
fn default_sqlite_wal_clean_second_interval() -> u32 {
1 * 60 // every minute
fn default_cache_capacity() -> u32 {
1024 * 1024 * 1024
}
fn default_max_request_size() -> u32 {
@ -141,21 +78,13 @@ fn default_log() -> String { @@ -141,21 +78,13 @@ fn default_log() -> String {
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
}
fn default_turn_ttl() -> u64 {
60 * 60 * 24
}
#[cfg(feature = "sled")]
pub type Engine = abstraction::sled::Engine;
#[cfg(feature = "sqlite")]
pub type Engine = abstraction::sqlite::Engine;
pub type Engine = abstraction::SledEngine;
#[cfg(feature = "heed")]
pub type Engine = abstraction::heed::Engine;
#[cfg(feature = "rocksdb")]
pub type Engine = abstraction::RocksDbEngine;
pub struct Database {
_db: Arc<Engine>,
pub globals: globals::Globals,
pub users: users::Users,
pub uiaa: uiaa::Uiaa,
@ -183,43 +112,9 @@ impl Database { @@ -183,43 +112,9 @@ impl Database {
Ok(())
}
fn check_sled_or_sqlite_db(config: &Config) -> Result<()> {
#[cfg(feature = "backend_sqlite")]
{
let path = Path::new(&config.database_path);
let sled_exists = path.join("db").exists();
let sqlite_exists = path.join("conduit.db").exists();
if sled_exists {
if sqlite_exists {
// most likely an in-place directory, only warn
warn!("Both sled and sqlite databases are detected in database directory");
warn!("Currently running from the sqlite database, but consider removing sled database files to free up space")
} else {
error!(
"Sled database detected, conduit now uses sqlite for database operations"
);
error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite");
return Err(Error::bad_config(
"sled database detected, migrate to sqlite",
));
}
}
}
Ok(())
}
/// Load an existing database or create a new one.
pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
Self::check_sled_or_sqlite_db(config)?;
if !Path::new(&config.database_path).exists() {
std::fs::create_dir_all(&config.database_path)
.map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?;
}
let builder = Engine::open(config)?;
pub async fn load_or_create(config: Config) -> Result<Arc<Self>> {
let builder = Engine::open(&config)?;
if config.max_request_size < 1024 {
eprintln!("ERROR: Max request size is less than 1KB. Please increase it.");
@ -228,13 +123,11 @@ impl Database { @@ -228,13 +123,11 @@ impl Database {
let (admin_sender, admin_receiver) = mpsc::unbounded();
let (sending_sender, sending_receiver) = mpsc::unbounded();
let db = Arc::new(TokioRwLock::from(Self {
_db: builder.clone(),
let db = Arc::new(Self {
users: users::Users {
userid_password: builder.open_tree("userid_password")?,
userid_displayname: builder.open_tree("userid_displayname")?,
userid_avatarurl: builder.open_tree("userid_avatarurl")?,
userid_blurhash: builder.open_tree("userid_blurhash")?,
userdeviceid_token: builder.open_tree("userdeviceid_token")?,
userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?,
userid_devicelistversion: builder.open_tree("userid_devicelistversion")?,
@ -278,8 +171,6 @@ impl Database { @@ -278,8 +171,6 @@ impl Database {
serverroomids: builder.open_tree("serverroomids")?,
userroomid_joined: builder.open_tree("userroomid_joined")?,
roomuserid_joined: builder.open_tree("roomuserid_joined")?,
roomid_joinedcount: builder.open_tree("roomid_joinedcount")?,
roomid_invitedcount: builder.open_tree("roomid_invitedcount")?,
roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?,
userroomid_invitestate: builder.open_tree("userroomid_invitestate")?,
roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?,
@ -290,42 +181,19 @@ impl Database { @@ -290,42 +181,19 @@ impl Database {
userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?,
shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?,
shorteventid_authchain: builder.open_tree("shorteventid_authchain")?,
roomid_shortroomid: builder.open_tree("roomid_shortroomid")?,
shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?,
stateid_shorteventid: builder.open_tree("stateid_shorteventid")?,
eventid_shorteventid: builder.open_tree("eventid_shorteventid")?,
shorteventid_eventid: builder.open_tree("shorteventid_eventid")?,
shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?,
roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?,
roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?,
statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?,
eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?,
softfailedeventids: builder.open_tree("softfailedeventids")?,
referencedevents: builder.open_tree("referencedevents")?,
pdu_cache: Mutex::new(LruCache::new(
config
.pdu_cache_capacity
.try_into()
.expect("pdu cache capacity fits into usize"),
)),
auth_chain_cache: Mutex::new(LruCache::new(1_000_000)),
shorteventid_cache: Mutex::new(LruCache::new(1_000_000)),
eventidshort_cache: Mutex::new(LruCache::new(1_000_000)),
shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)),
statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)),
our_real_users_cache: RwLock::new(HashMap::new()),
appservice_in_room_cache: RwLock::new(HashMap::new()),
stateinfo_cache: Mutex::new(LruCache::new(1000)),
prevevent_parent: builder.open_tree("prevevent_parent")?,
pdu_cache: RwLock::new(LruCache::new(1_000_000)),
},
account_data: account_data::AccountData {
roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,
roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?,
},
media: media::Media {
mediaid_file: builder.open_tree("mediaid_file")?,
@ -340,8 +208,8 @@ impl Database { @@ -340,8 +208,8 @@ impl Database {
},
sending: sending::Sending {
servername_educount: builder.open_tree("servername_educount")?,
servernameevent_data: builder.open_tree("servernameevent_data")?,
servercurrentevent_data: builder.open_tree("servercurrentevent_data")?,
servernamepduids: builder.open_tree("servernamepduids")?,
servercurrentevents: builder.open_tree("servercurrentevents")?,
maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)),
sender: sending_sender,
},
@ -358,438 +226,104 @@ impl Database { @@ -358,438 +226,104 @@ impl Database {
globals: globals::Globals::load(
builder.open_tree("global")?,
builder.open_tree("server_signingkeys")?,
config.clone(),
config,
)?,
}));
{
let db = db.read().await;
// MIGRATIONS
// TODO: database versions of new dbs should probably not be 0
if db.globals.database_version()? < 1 {
for (roomserverid, _) in db.rooms.roomserverids.iter() {
let mut parts = roomserverid.split(|&b| b == 0xff);
let room_id = parts.next().expect("split always returns one element");
let servername = match parts.next() {
Some(s) => s,
None => {
error!("Migration: Invalid roomserverid in db.");
continue;
}
};
let mut serverroomid = servername.to_vec();
serverroomid.push(0xff);
serverroomid.extend_from_slice(room_id);
db.rooms.serverroomids.insert(&serverroomid, &[])?;
}
db.globals.bump_database_version(1)?;
println!("Migration: 0 -> 1 finished");
}
if db.globals.database_version()? < 2 {
// We accidentally inserted hashed versions of "" into the db instead of just ""
for (userid, password) in db.users.userid_password.iter() {
let password = utils::string_from_bytes(&password);
let empty_hashed_password = password.map_or(false, |password| {
argon2::verify_encoded(&password, b"").unwrap_or(false)
});
if empty_hashed_password {
db.users.userid_password.insert(&userid, b"")?;
}
}
db.globals.bump_database_version(2)?;
println!("Migration: 1 -> 2 finished");
}
});
if db.globals.database_version()? < 3 {
// Move media to filesystem
for (key, content) in db.media.mediaid_file.iter() {
if content.is_empty() {
// MIGRATIONS
// TODO: database versions of new dbs should probably not be 0
if db.globals.database_version()? < 1 {
for (roomserverid, _) in db.rooms.roomserverids.iter() {
let mut parts = roomserverid.split(|&b| b == 0xff);
let room_id = parts.next().expect("split always returns one element");
let servername = match parts.next() {
Some(s) => s,
None => {
error!("Migration: Invalid roomserverid in db.");
continue;
}
};
let mut serverroomid = servername.to_vec();
serverroomid.push(0xff);
serverroomid.extend_from_slice(room_id);
let path = db.globals.get_media_file(&key);
let mut file = fs::File::create(path)?;
file.write_all(&content)?;
db.media.mediaid_file.insert(&key, &[])?;
}
db.globals.bump_database_version(3)?;
println!("Migration: 2 -> 3 finished");
db.rooms.serverroomids.insert(&serverroomid, &[])?;
}
if db.globals.database_version()? < 4 {
// Add federated users to db as deactivated
for our_user in db.users.iter() {
let our_user = our_user?;
if db.users.is_deactivated(&our_user)? {
continue;
}
for room in db.rooms.rooms_joined(&our_user) {
for user in db.rooms.room_members(&room?) {
let user = user?;
if user.server_name() != db.globals.server_name() {
println!("Migration: Creating user {}", user);
db.users.create(&user, None)?;
}
}
}
}
db.globals.bump_database_version(4)?;
db.globals.bump_database_version(1)?;
println!("Migration: 3 -> 4 finished");
}
if db.globals.database_version()? < 5 {
// Upgrade user data store
for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() {
let mut parts = roomuserdataid.split(|&b| b == 0xff);
let room_id = parts.next().unwrap();
let user_id = parts.next().unwrap();
let event_type = roomuserdataid.rsplit(|&b| b == 0xff).next().unwrap();
let mut key = room_id.to_vec();
key.push(0xff);
key.extend_from_slice(user_id);
key.push(0xff);
key.extend_from_slice(event_type);
db.account_data
.roomusertype_roomuserdataid
.insert(&key, &roomuserdataid)?;
}
println!("Migration: 0 -> 1 finished");
}
db.globals.bump_database_version(5)?;
if db.globals.database_version()? < 2 {
// We accidentally inserted hashed versions of "" into the db instead of just ""
for (userid, password) in db.users.userid_password.iter() {
let password = utils::string_from_bytes(&password);
println!("Migration: 4 -> 5 finished");
}
let empty_hashed_password = password.map_or(false, |password| {
argon2::verify_encoded(&password, b"").unwrap_or(false)
});
if db.globals.database_version()? < 6 {
// Set room member count
for (roomid, _) in db.rooms.roomid_shortstatehash.iter() {
let string = utils::string_from_bytes(&roomid).unwrap();
let room_id = <&RoomId>::try_from(string.as_str()).unwrap();
db.rooms.update_joined_count(room_id, &db)?;
if empty_hashed_password {
db.users.userid_password.insert(&userid, b"")?;
}
db.globals.bump_database_version(6)?;
println!("Migration: 5 -> 6 finished");
}
if db.globals.database_version()? < 7 {
// Upgrade state store
let mut last_roomstates: HashMap<Box<RoomId>, u64> = HashMap::new();
let mut current_sstatehash: Option<u64> = None;
let mut current_room = None;
let mut current_state = HashSet::new();
let mut counter = 0;
let mut handle_state =
|current_sstatehash: u64,
current_room: &RoomId,
current_state: HashSet<_>,
last_roomstates: &mut HashMap<_, _>| {
counter += 1;
println!("counter: {}", counter);
let last_roomsstatehash = last_roomstates.get(current_room);
let states_parents = last_roomsstatehash.map_or_else(
|| Ok(Vec::new()),
|&last_roomsstatehash| {
db.rooms.load_shortstatehash_info(dbg!(last_roomsstatehash))
},
)?;
let (statediffnew, statediffremoved) =
if let Some(parent_stateinfo) = states_parents.last() {
let statediffnew = current_state
.difference(&parent_stateinfo.1)
.copied()
.collect::<HashSet<_>>();
let statediffremoved = parent_stateinfo
.1
.difference(&current_state)
.copied()
.collect::<HashSet<_>>();
(statediffnew, statediffremoved)
} else {
(current_state, HashSet::new())
};
db.rooms.save_state_from_diff(
dbg!(current_sstatehash),
statediffnew,
statediffremoved,
2, // every state change is 2 event changes on average
states_parents,
)?;
/*
let mut tmp = db.rooms.load_shortstatehash_info(&current_sstatehash, &db)?;
let state = tmp.pop().unwrap();
println!(
"{}\t{}{:?}: {:?} + {:?} - {:?}",
current_room,
" ".repeat(tmp.len()),
utils::u64_from_bytes(&current_sstatehash).unwrap(),
tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()),
state
.2
.iter()
.map(|b| utils::u64_from_bytes(&b[size_of::<u64>()..]).unwrap())
.collect::<Vec<_>>(),
state
.3
.iter()
.map(|b| utils::u64_from_bytes(&b[size_of::<u64>()..]).unwrap())
.collect::<Vec<_>>()
);
*/
Ok::<_, Error>(())
};
for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() {
let sstatehash = utils::u64_from_bytes(&k[0..size_of::<u64>()])
.expect("number of bytes is correct");
let sstatekey = k[size_of::<u64>()..].to_vec();
if Some(sstatehash) != current_sstatehash {
if let Some(current_sstatehash) = current_sstatehash {
handle_state(
current_sstatehash,
current_room.as_deref().unwrap(),
current_state,
&mut last_roomstates,
)?;
last_roomstates
.insert(current_room.clone().unwrap(), current_sstatehash);
}
current_state = HashSet::new();
current_sstatehash = Some(sstatehash);
let event_id = db
.rooms
.shorteventid_eventid
.get(&seventid)
.unwrap()
.unwrap();
let string = utils::string_from_bytes(&event_id).unwrap();
let event_id = <&EventId>::try_from(string.as_str()).unwrap();
let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap();
if Some(&pdu.room_id) != current_room.as_ref() {
current_room = Some(pdu.room_id.clone());
}
}
db.globals.bump_database_version(2)?;
let mut val = sstatekey;
val.extend_from_slice(&seventid);
current_state.insert(val.try_into().expect("size is correct"));
}
println!("Migration: 1 -> 2 finished");
}
if let Some(current_sstatehash) = current_sstatehash {
handle_state(
current_sstatehash,
current_room.as_deref().unwrap(),
current_state,
&mut last_roomstates,
)?;
if db.globals.database_version()? < 3 {
// Move media to filesystem
for (key, content) in db.media.mediaid_file.iter() {
if content.len() == 0 {
continue;
}
db.globals.bump_database_version(7)?;
println!("Migration: 6 -> 7 finished");
let path = db.globals.get_media_file(&key);
let mut file = fs::File::create(path)?;
file.write_all(&content)?;
db.media.mediaid_file.insert(&key, &[])?;
}
if db.globals.database_version()? < 8 {
// Generate short room ids for all rooms
for (room_id, _) in db.rooms.roomid_shortstatehash.iter() {
let shortroomid = db.globals.next_count()?.to_be_bytes();
db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?;
println!("Migration: 8");
}
// Update pduids db layout
let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| {
if !key.starts_with(b"!") {
return None;
}
let mut parts = key.splitn(2, |&b| b == 0xff);
let room_id = parts.next().unwrap();
let count = parts.next().unwrap();
let short_room_id = db
.rooms
.roomid_shortroomid
.get(room_id)
.unwrap()
.expect("shortroomid should exist");
let mut new_key = short_room_id;
new_key.extend_from_slice(count);
Some((new_key, v))
});
db.rooms.pduid_pdu.insert_batch(&mut batch)?;
let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| {
if !value.starts_with(b"!") {
return None;
}
let mut parts = value.splitn(2, |&b| b == 0xff);
let room_id = parts.next().unwrap();
let count = parts.next().unwrap();
let short_room_id = db
.rooms
.roomid_shortroomid
.get(room_id)
.unwrap()
.expect("shortroomid should exist");
let mut new_value = short_room_id;
new_value.extend_from_slice(count);
Some((k, new_value))
});
db.rooms.eventid_pduid.insert_batch(&mut batch2)?;
db.globals.bump_database_version(8)?;
db.globals.bump_database_version(3)?;
println!("Migration: 7 -> 8 finished");
}
println!("Migration: 2 -> 3 finished");
}
if db.globals.database_version()? < 9 {
// Update tokenids db layout
let mut iter = db
.rooms
.tokenids
.iter()
.filter_map(|(key, _)| {
if !key.starts_with(b"!") {
return None;
}
let mut parts = key.splitn(4, |&b| b == 0xff);
let room_id = parts.next().unwrap();
let word = parts.next().unwrap();
let _pdu_id_room = parts.next().unwrap();
let pdu_id_count = parts.next().unwrap();
let short_room_id = db
.rooms
.roomid_shortroomid
.get(room_id)
.unwrap()
.expect("shortroomid should exist");
let mut new_key = short_room_id;
new_key.extend_from_slice(word);
new_key.push(0xff);
new_key.extend_from_slice(pdu_id_count);
println!("old {:?}", key);
println!("new {:?}", new_key);
Some((new_key, Vec::new()))
})
.peekable();
while iter.peek().is_some() {
db.rooms
.tokenids
.insert_batch(&mut iter.by_ref().take(1000))?;
println!("smaller batch done");
if db.globals.database_version()? < 4 {
// Add federated users to db as deactivated
for our_user in db.users.iter() {
let our_user = our_user?;
if db.users.is_deactivated(&our_user)? {
continue;
}
println!("Deleting starts");
let batch2: Vec<_> = db
.rooms
.tokenids
.iter()
.filter_map(|(key, _)| {
if key.starts_with(b"!") {
println!("del {:?}", key);
Some(key)
} else {
None
for room in db.rooms.rooms_joined(&our_user) {
for user in db.rooms.room_members(&room?) {
let user = user?;
if user.server_name() != db.globals.server_name() {
println!("Migration: Creating user {}", user);
db.users.create(&user, None)?;
}
})
.collect();
for key in batch2 {
println!("del");
db.rooms.tokenids.remove(&key)?;
}
}
db.globals.bump_database_version(9)?;
println!("Migration: 8 -> 9 finished");
}
if db.globals.database_version()? < 10 {
// Add other direction for shortstatekeys
for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() {
db.rooms
.shortstatekey_statekey
.insert(&shortstatekey, &statekey)?;
}
db.globals.bump_database_version(4)?;
// Force E2EE device list updates so we can send them over federation
for user_id in db.users.iter().filter_map(|r| r.ok()) {
db.users
.mark_device_key_update(&user_id, &db.rooms, &db.globals)?;
}
db.globals.bump_database_version(10)?;
println!("Migration: 9 -> 10 finished");
}
println!("Migration: 3 -> 4 finished");
}
let guard = db.read().await;
// This data is probably outdated
guard.rooms.edus.presenceid_presence.clear()?;
guard.admin.start_handler(Arc::clone(&db), admin_receiver);
guard
.sending
.start_handler(Arc::clone(&db), sending_receiver);
db.rooms.edus.presenceid_presence.clear()?;
drop(guard);
#[cfg(feature = "sqlite")]
{
Self::start_wal_clean_task(Arc::clone(&db), config).await;
}
db.admin.start_handler(Arc::clone(&db), admin_receiver);
db.sending.start_handler(Arc::clone(&db), sending_receiver);
Ok(db)
}
#[cfg(feature = "conduit_bin")]
pub async fn start_on_shutdown_tasks(db: Arc<TokioRwLock<Self>>, shutdown: Shutdown) {
use tracing::info;
tokio::spawn(async move {
shutdown.await;
info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers...");
db.read().await.globals.rotate.fire();
});
}
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) {
let userid_bytes = user_id.as_bytes().to_vec();
let mut userid_prefix = userid_bytes.clone();
@ -816,34 +350,15 @@ impl Database { @@ -816,34 +350,15 @@ impl Database {
.watch_prefix(&userid_prefix),
);
futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix));
futures.push(
self.rooms
.userroomid_notificationcount
.watch_prefix(&userid_prefix),
);
futures.push(
self.rooms
.userroomid_highlightcount
.watch_prefix(&userid_prefix),
);
// Events for rooms we are in
for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
let short_roomid = self
.rooms
.get_shortroomid(&room_id)
.ok()
.flatten()
.expect("room exists")
.to_be_bytes()
.to_vec();
let roomid_bytes = room_id.as_bytes().to_vec();
let mut roomid_prefix = roomid_bytes.clone();
roomid_prefix.push(0xff);
// PDUs
futures.push(self.rooms.pduid_pdu.watch_prefix(&short_roomid));
futures.push(self.rooms.pduid_pdu.watch_prefix(&roomid_prefix));
// EDUs
futures.push(
@ -869,7 +384,7 @@ impl Database { @@ -869,7 +384,7 @@ impl Database {
futures.push(
self.account_data
.roomusertype_roomuserdataid
.roomuserdataid_accountdata
.watch_prefix(&roomuser_prefix),
);
}
@ -879,7 +394,7 @@ impl Database { @@ -879,7 +394,7 @@ impl Database {
futures.push(
self.account_data
.roomusertype_roomuserdataid
.roomuserdataid_accountdata
.watch_prefix(&globaluserdata_prefix),
);
@ -893,97 +408,13 @@ impl Database { @@ -893,97 +408,13 @@ impl Database {
.watch_prefix(&userid_bytes),
);
futures.push(Box::pin(self.globals.rotate.watch()));
// Wait until one of them finds something
futures.next().await;
}
#[tracing::instrument(skip(self))]
pub fn flush(&self) -> Result<()> {
let start = std::time::Instant::now();
let res = self._db.flush();
debug!("flush: took {:?}", start.elapsed());
res
}
#[cfg(feature = "sqlite")]
#[tracing::instrument(skip(self))]
pub fn flush_wal(&self) -> Result<()> {
self._db.flush_wal()
}
#[cfg(feature = "sqlite")]
#[tracing::instrument(skip(db, config))]
pub async fn start_wal_clean_task(db: Arc<TokioRwLock<Self>>, config: &Config) {
use tokio::time::interval;
#[cfg(unix)]
use tokio::signal::unix::{signal, SignalKind};
use tracing::info;
use std::time::{Duration, Instant};
let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64);
tokio::spawn(async move {
let mut i = interval(timer_interval);
#[cfg(unix)]
let mut s = signal(SignalKind::hangup()).unwrap();
loop {
#[cfg(unix)]
tokio::select! {
_ = i.tick() => {
info!("wal-trunc: Timer ticked");
}
_ = s.recv() => {
info!("wal-trunc: Received SIGHUP");
}
};
#[cfg(not(unix))]
{
i.tick().await;
info!("wal-trunc: Timer ticked")
}
let start = Instant::now();
if let Err(e) = db.read().await.flush_wal() {
error!("wal-trunc: Errored: {}", e);
} else {
info!("wal-trunc: Flushed in {:?}", start.elapsed());
}
}
});
}
}
pub struct DatabaseGuard(OwnedRwLockReadGuard<Database>);
impl Deref for DatabaseGuard {
type Target = OwnedRwLockReadGuard<Database>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[rocket::async_trait]
impl<'r> FromRequest<'r> for DatabaseGuard {
type Error = ();
async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome<Self, ()> {
let db = try_outcome!(req.guard::<&State<Arc<TokioRwLock<Database>>>>().await);
Ok(DatabaseGuard(Arc::clone(db).read_owned().await)).or_forward(())
}
}
impl From<OwnedRwLockReadGuard<Database>> for DatabaseGuard {
fn from(val: OwnedRwLockReadGuard<Database>) -> Self {
Self(val)
pub async fn flush(&self) -> Result<()> {
// noop while we don't use sled 1.0
//self._db.flush_async().await?;
Ok(())
}
}

303
src/database/abstraction.rs

@ -1,46 +1,51 @@ @@ -1,46 +1,51 @@
use super::Config;
use crate::Result;
use crate::{utils, Result};
use log::warn;
use std::{future::Future, pin::Pin, sync::Arc};
#[cfg(feature = "sled")]
pub mod sled;
#[cfg(feature = "rocksdb")]
use std::{collections::BTreeMap, sync::RwLock};
#[cfg(feature = "sqlite")]
pub mod sqlite;
#[cfg(feature = "sled")]
pub struct SledEngine(sled::Db);
#[cfg(feature = "sled")]
pub struct SledEngineTree(sled::Tree);
#[cfg(feature = "heed")]
pub mod heed;
#[cfg(feature = "rocksdb")]
pub struct RocksDbEngine(rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>);
#[cfg(feature = "rocksdb")]
pub struct RocksDbEngineTree<'a> {
db: Arc<RocksDbEngine>,
name: &'a str,
watchers: RwLock<BTreeMap<Vec<u8>, Vec<tokio::sync::oneshot::Sender<()>>>>,
}
pub trait DatabaseEngine: Sized {
fn open(config: &Config) -> Result<Arc<Self>>;
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>>;
fn flush(self: &Arc<Self>) -> Result<()>;
}
pub trait Tree: Send + Sync {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>;
fn insert_batch(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()>;
fn remove(&self, key: &[u8]) -> Result<()>;
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a>;
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + Sync + 'a>;
fn iter_from<'a>(
&'a self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a>;
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a>;
fn increment(&self, key: &[u8]) -> Result<Vec<u8>>;
fn increment_batch(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()>;
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a>;
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + 'a>;
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>>;
@ -52,3 +57,273 @@ pub trait Tree: Send + Sync { @@ -52,3 +57,273 @@ pub trait Tree: Send + Sync {
Ok(())
}
}
#[cfg(feature = "sled")]
impl DatabaseEngine for SledEngine {
fn open(config: &Config) -> Result<Arc<Self>> {
Ok(Arc::new(SledEngine(
sled::Config::default()
.path(&config.database_path)
.cache_capacity(config.cache_capacity as u64)
.use_compression(false)
.open()?,
)))
}
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
Ok(Arc::new(SledEngineTree(self.0.open_tree(name)?)))
}
}
#[cfg(feature = "sled")]
impl Tree for SledEngineTree {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
Ok(self.0.get(key)?.map(|v| v.to_vec()))
}
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
self.0.insert(key, value)?;
Ok(())
}
fn remove(&self, key: &[u8]) -> Result<()> {
self.0.remove(key)?;
Ok(())
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + Sync + 'a> {
Box::new(
self.0
.iter()
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into())),
)
}
fn iter_from(
&self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)>> {
let iter = if backwards {
self.0.range(..from)
} else {
self.0.range(from..)
};
let iter = iter
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into()));
if backwards {
Box::new(iter.rev())
} else {
Box::new(iter)
}
}
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
Ok(self
.0
.update_and_fetch(key, utils::increment)
.map(|o| o.expect("increment always sets a value").to_vec())?)
}
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + 'a> {
let iter = self
.0
.scan_prefix(prefix)
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into()));
Box::new(iter)
}
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let prefix = prefix.to_vec();
Box::pin(async move {
self.0.watch_prefix(prefix).await;
})
}
}
#[cfg(feature = "rocksdb")]
impl DatabaseEngine for RocksDbEngine {
fn open(config: &Config) -> Result<Arc<Self>> {
let mut db_opts = rocksdb::Options::default();
db_opts.create_if_missing(true);
db_opts.set_max_open_files(16);
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy);
db_opts.set_target_file_size_base(256 << 20);
db_opts.set_write_buffer_size(256 << 20);
let mut block_based_options = rocksdb::BlockBasedOptions::default();
block_based_options.set_block_size(512 << 10);
db_opts.set_block_based_table_factory(&block_based_options);
let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf(
&db_opts,
&config.database_path,
)
.unwrap_or_default();
let mut options = rocksdb::Options::default();
options.set_merge_operator_associative("increment", utils::increment_rocksdb);
let db = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::open_cf_descriptors(
&db_opts,
&config.database_path,
cfs.iter()
.map(|name| rocksdb::ColumnFamilyDescriptor::new(name, options.clone())),
)?;
Ok(Arc::new(RocksDbEngine(db)))
}
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
let mut options = rocksdb::Options::default();
options.set_merge_operator_associative("increment", utils::increment_rocksdb);
// Create if it doesn't exist
let _ = self.0.create_cf(name, &options);
Ok(Arc::new(RocksDbEngineTree {
name,
db: Arc::clone(self),
watchers: RwLock::new(BTreeMap::new()),
}))
}
}
#[cfg(feature = "rocksdb")]
impl RocksDbEngineTree<'_> {
fn cf(&self) -> rocksdb::BoundColumnFamily<'_> {
self.db.0.cf_handle(self.name).unwrap()
}
}
#[cfg(feature = "rocksdb")]
impl Tree for RocksDbEngineTree<'_> {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
Ok(self.db.0.get_cf(self.cf(), key)?)
}
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
let watchers = self.watchers.read().unwrap();
let mut triggered = Vec::new();
for length in 0..=key.len() {
if watchers.contains_key(&key[..length]) {
triggered.push(&key[..length]);
}
}
drop(watchers);
if !triggered.is_empty() {
let mut watchers = self.watchers.write().unwrap();
for prefix in triggered {
if let Some(txs) = watchers.remove(prefix) {
for tx in txs {
let _ = tx.send(());
}
}
}
}
Ok(self.db.0.put_cf(self.cf(), key, value)?)
}
fn remove(&self, key: &[u8]) -> Result<()> {
Ok(self.db.0.delete_cf(self.cf(), key)?)
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + Sync + 'a> {
Box::new(
self.db
.0
.iterator_cf(self.cf(), rocksdb::IteratorMode::Start),
)
}
fn iter_from<'a>(
&'a self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
Box::new(self.db.0.iterator_cf(
self.cf(),
rocksdb::IteratorMode::From(
from,
if backwards {
rocksdb::Direction::Reverse
} else {
rocksdb::Direction::Forward
},
),
))
}
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.db.0]), None).unwrap();
dbg!(stats.mem_table_total);
dbg!(stats.mem_table_unflushed);
dbg!(stats.mem_table_readers_total);
dbg!(stats.cache_total);
// TODO: atomic?
let old = self.get(key)?;
let new = utils::increment(old.as_deref()).unwrap();
self.insert(key, &new)?;
Ok(new)
}
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + Send + 'a> {
Box::new(
self.db
.0
.iterator_cf(
self.cf(),
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
)
.take_while(move |(k, _)| k.starts_with(&prefix)),
)
}
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let (tx, rx) = tokio::sync::oneshot::channel();
self.watchers
.write()
.unwrap()
.entry(prefix.to_vec())
.or_default()
.push(tx);
Box::pin(async move {
// Tx is never destroyed
rx.await.unwrap();
})
}
}

240
src/database/abstraction/heed.rs

@ -1,240 +0,0 @@ @@ -1,240 +0,0 @@
use super::super::Config;
use crossbeam::channel::{bounded, Sender as ChannelSender};
use threadpool::ThreadPool;
use crate::{Error, Result};
use std::{
collections::HashMap,
future::Future,
pin::Pin,
sync::{Arc, Mutex, RwLock},
};
use tokio::sync::oneshot::Sender;
use super::{DatabaseEngine, Tree};
type TupleOfBytes = (Vec<u8>, Vec<u8>);
pub struct Engine {
env: heed::Env,
iter_pool: Mutex<ThreadPool>,
}
pub struct EngineTree {
engine: Arc<Engine>,
tree: Arc<heed::UntypedDatabase>,
watchers: RwLock<HashMap<Vec<u8>, Vec<Sender<()>>>>,
}
fn convert_error(error: heed::Error) -> Error {
Error::HeedError {
error: error.to_string(),
}
}
impl DatabaseEngine for Engine {
fn open(config: &Config) -> Result<Arc<Self>> {
let mut env_builder = heed::EnvOpenOptions::new();
env_builder.map_size(1024 * 1024 * 1024 * 1024); // 1 Terabyte
env_builder.max_readers(126);
env_builder.max_dbs(128);
unsafe {
env_builder.flag(heed::flags::Flags::MdbWriteMap);
env_builder.flag(heed::flags::Flags::MdbMapAsync);
}
Ok(Arc::new(Engine {
env: env_builder
.open(&config.database_path)
.map_err(convert_error)?,
iter_pool: Mutex::new(ThreadPool::new(10)),
}))
}
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
// Creates the db if it doesn't exist already
Ok(Arc::new(EngineTree {
engine: Arc::clone(self),
tree: Arc::new(
self.env
.create_database(Some(name))
.map_err(convert_error)?,
),
watchers: RwLock::new(HashMap::new()),
}))
}
fn flush(self: &Arc<Self>) -> Result<()> {
self.env.force_sync().map_err(convert_error)?;
Ok(())
}
}
impl EngineTree {
#[tracing::instrument(skip(self, tree, from, backwards))]
fn iter_from_thread(
&self,
tree: Arc<heed::UntypedDatabase>,
from: Vec<u8>,
backwards: bool,
) -> Box<dyn Iterator<Item = TupleOfBytes> + Send + Sync> {
let (s, r) = bounded::<TupleOfBytes>(100);
let engine = Arc::clone(&self.engine);
let lock = self.engine.iter_pool.lock().await;
if lock.active_count() < lock.max_count() {
lock.execute(move || {
iter_from_thread_work(tree, &engine.env.read_txn().unwrap(), from, backwards, &s);
});
} else {
std::thread::spawn(move || {
iter_from_thread_work(tree, &engine.env.read_txn().unwrap(), from, backwards, &s);
});
}
Box::new(r.into_iter())
}
}
#[tracing::instrument(skip(tree, txn, from, backwards))]
fn iter_from_thread_work(
tree: Arc<heed::UntypedDatabase>,
txn: &heed::RoTxn<'_>,
from: Vec<u8>,
backwards: bool,
s: &ChannelSender<(Vec<u8>, Vec<u8>)>,
) {
if backwards {
for (k, v) in tree.rev_range(txn, ..=&*from).unwrap().map(|r| r.unwrap()) {
if s.send((k.to_vec(), v.to_vec())).is_err() {
return;
}
}
} else {
if from.is_empty() {
for (k, v) in tree.iter(txn).unwrap().map(|r| r.unwrap()) {
if s.send((k.to_vec(), v.to_vec())).is_err() {
return;
}
}
} else {
for (k, v) in tree.range(txn, &*from..).unwrap().map(|r| r.unwrap()) {
if s.send((k.to_vec(), v.to_vec())).is_err() {
return;
}
}
}
}
}
impl Tree for EngineTree {
#[tracing::instrument(skip(self, key))]
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
let txn = self.engine.env.read_txn().map_err(convert_error)?;
Ok(self
.tree
.get(&txn, &key)
.map_err(convert_error)?
.map(|s| s.to_vec()))
}
#[tracing::instrument(skip(self, key, value))]
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
self.tree
.put(&mut txn, &key, &value)
.map_err(convert_error)?;
txn.commit().map_err(convert_error)?;
let watchers = self.watchers.read().unwrap();
let mut triggered = Vec::new();
for length in 0..=key.len() {
if watchers.contains_key(&key[..length]) {
triggered.push(&key[..length]);
}
}
drop(watchers);
if !triggered.is_empty() {
let mut watchers = self.watchers.write().unwrap();
for prefix in triggered {
if let Some(txs) = watchers.remove(prefix) {
for tx in txs {
let _ = tx.send(());
}
}
}
};
Ok(())
}
#[tracing::instrument(skip(self, key))]
fn remove(&self, key: &[u8]) -> Result<()> {
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
self.tree.delete(&mut txn, &key).map_err(convert_error)?;
txn.commit().map_err(convert_error)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a> {
self.iter_from(&[], false)
}
#[tracing::instrument(skip(self, from, backwards))]
fn iter_from(
&self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send> {
self.iter_from_thread(Arc::clone(&self.tree), from.to_vec(), backwards)
}
#[tracing::instrument(skip(self, key))]
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
let mut txn = self.engine.env.write_txn().map_err(convert_error)?;
let old = self.tree.get(&txn, &key).map_err(convert_error)?;
let new =
crate::utils::increment(old.as_deref()).expect("utils::increment always returns Some");
self.tree
.put(&mut txn, &key, &&*new)
.map_err(convert_error)?;
txn.commit().map_err(convert_error)?;
Ok(new)
}
#[tracing::instrument(skip(self, prefix))]
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + Send + 'a> {
Box::new(
self.iter_from(&prefix, false)
.take_while(move |(key, _)| key.starts_with(&prefix)),
)
}
#[tracing::instrument(skip(self, prefix))]
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let (tx, rx) = tokio::sync::oneshot::channel();
self.watchers
.write()
.unwrap()
.entry(prefix.to_vec())
.or_default()
.push(tx);
Box::pin(async move {
// Tx is never destroyed
rx.await.unwrap();
})
}
}

128
src/database/abstraction/sled.rs

@ -1,128 +0,0 @@ @@ -1,128 +0,0 @@
use super::super::Config;
use crate::{utils, Result};
use std::{future::Future, pin::Pin, sync::Arc};
use tracing::warn;
use super::{DatabaseEngine, Tree};
pub struct Engine(sled::Db);
pub struct SledEngineTree(sled::Tree);
impl DatabaseEngine for Engine {
fn open(config: &Config) -> Result<Arc<Self>> {
Ok(Arc::new(Engine(
sled::Config::default()
.path(&config.database_path)
.cache_capacity((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64)
.use_compression(true)
.open()?,
)))
}
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
Ok(Arc::new(SledEngineTree(self.0.open_tree(name)?)))
}
fn flush(self: &Arc<Self>) -> Result<()> {
Ok(()) // noop
}
}
impl Tree for SledEngineTree {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
Ok(self.0.get(key)?.map(|v| v.to_vec()))
}
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
self.0.insert(key, value)?;
Ok(())
}
#[tracing::instrument(skip(self, iter))]
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
for (key, value) in iter {
self.0.insert(key, value)?;
}
Ok(())
}
fn remove(&self, key: &[u8]) -> Result<()> {
self.0.remove(key)?;
Ok(())
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
Box::new(
self.0
.iter()
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into())),
)
}
fn iter_from(
&self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)>> {
let iter = if backwards {
self.0.range(..=from)
} else {
self.0.range(from..)
};
let iter = iter
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into()));
if backwards {
Box::new(iter.rev())
} else {
Box::new(iter)
}
}
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
Ok(self
.0
.update_and_fetch(key, utils::increment)
.map(|o| o.expect("increment always sets a value").to_vec())?)
}
fn scan_prefix<'a>(
&'a self,
prefix: Vec<u8>,
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
let iter = self
.0
.scan_prefix(prefix)
.filter_map(|r| {
if let Err(e) = &r {
warn!("Error: {}", e);
}
r.ok()
})
.map(|(k, v)| (k.to_vec().into(), v.to_vec().into()));
Box::new(iter)
}
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let prefix = prefix.to_vec();
Box::pin(async move {
self.0.watch_prefix(prefix).await;
})
}
}

392
src/database/abstraction/sqlite.rs

@ -1,392 +0,0 @@ @@ -1,392 +0,0 @@
use super::{DatabaseEngine, Tree};
use crate::{database::Config, Result};
use parking_lot::{Mutex, MutexGuard, RwLock};
use rusqlite::{Connection, DatabaseName::Main, OptionalExtension};
use std::{
cell::RefCell,
collections::{hash_map, HashMap},
future::Future,
path::{Path, PathBuf},
pin::Pin,
sync::Arc,
};
use thread_local::ThreadLocal;
use tokio::sync::watch;
use tracing::debug;
thread_local! {
static READ_CONNECTION: RefCell<Option<&'static Connection>> = RefCell::new(None);
static READ_CONNECTION_ITERATOR: RefCell<Option<&'static Connection>> = RefCell::new(None);
}
struct PreparedStatementIterator<'a> {
pub iterator: Box<dyn Iterator<Item = TupleOfBytes> + 'a>,
pub statement_ref: NonAliasingBox<rusqlite::Statement<'a>>,
}
impl Iterator for PreparedStatementIterator<'_> {
type Item = TupleOfBytes;
fn next(&mut self) -> Option<Self::Item> {
self.iterator.next()
}
}
struct NonAliasingBox<T>(*mut T);
impl<T> Drop for NonAliasingBox<T> {
fn drop(&mut self) {
unsafe { Box::from_raw(self.0) };
}
}
pub struct Engine {
writer: Mutex<Connection>,
read_conn_tls: ThreadLocal<Connection>,
read_iterator_conn_tls: ThreadLocal<Connection>,
path: PathBuf,
cache_size_per_thread: u32,
}
impl Engine {
fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result<Connection> {
let conn = Connection::open(&path)?;
conn.pragma_update(Some(Main), "page_size", &2048)?;
conn.pragma_update(Some(Main), "journal_mode", &"WAL")?;
conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?;
conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?;
conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?;
Ok(conn)
}
fn write_lock(&self) -> MutexGuard<'_, Connection> {
self.writer.lock()
}
fn read_lock(&self) -> &Connection {
self.read_conn_tls
.get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap())
}
fn read_lock_iterator(&self) -> &Connection {
self.read_iterator_conn_tls
.get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap())
}
pub fn flush_wal(self: &Arc<Self>) -> Result<()> {
self.write_lock()
.pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?;
Ok(())
}
}
impl DatabaseEngine for Engine {
fn open(config: &Config) -> Result<Arc<Self>> {
let path = Path::new(&config.database_path).join("conduit.db");
// calculates cache-size per permanent connection
// 1. convert MB to KiB
// 2. divide by permanent connections + permanent iter connections + write connection
// 3. round down to nearest integer
let cache_size_per_thread: u32 = ((config.db_cache_capacity_mb * 1024.0)
/ ((num_cpus::get().max(1) * 2) + 1) as f64)
as u32;
let writer = Mutex::new(Self::prepare_conn(&path, cache_size_per_thread)?);
let arc = Arc::new(Engine {
writer,
read_conn_tls: ThreadLocal::new(),
read_iterator_conn_tls: ThreadLocal::new(),
path,
cache_size_per_thread,
});
Ok(arc)
}
fn open_tree(self: &Arc<Self>, name: &str) -> Result<Arc<dyn Tree>> {
self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?;
Ok(Arc::new(SqliteTable {
engine: Arc::clone(self),
name: name.to_owned(),
watchers: RwLock::new(HashMap::new()),
}))
}
fn flush(self: &Arc<Self>) -> Result<()> {
// we enabled PRAGMA synchronous=normal, so this should not be necessary
Ok(())
}
}
pub struct SqliteTable {
engine: Arc<Engine>,
name: String,
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
}
type TupleOfBytes = (Vec<u8>, Vec<u8>);
impl SqliteTable {
#[tracing::instrument(skip(self, guard, key))]
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
//dbg!(&self.name);
Ok(guard
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
.query_row([key], |row| row.get(0))
.optional()?)
}
#[tracing::instrument(skip(self, guard, key, value))]
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
//dbg!(&self.name);
guard.execute(
format!(
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
self.name
)
.as_str(),
[key, value],
)?;
Ok(())
}
pub fn iter_with_guard<'a>(
&'a self,
guard: &'a Connection,
) -> Box<dyn Iterator<Item = TupleOfBytes> + 'a> {
let statement = Box::leak(Box::new(
guard
.prepare(&format!(
"SELECT key, value FROM {} ORDER BY key ASC",
&self.name
))
.unwrap(),
));
let statement_ref = NonAliasingBox(statement);
//let name = self.name.clone();
let iterator = Box::new(
statement
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
.unwrap()
.map(move |r| {
//dbg!(&name);
r.unwrap()
}),
);
Box::new(PreparedStatementIterator {
iterator,
statement_ref,
})
}
}
impl Tree for SqliteTable {
#[tracing::instrument(skip(self, key))]
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
self.get_with_guard(self.engine.read_lock(), key)
}
#[tracing::instrument(skip(self, key, value))]
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
let guard = self.engine.write_lock();
self.insert_with_guard(&guard, key, value)?;
drop(guard);
let watchers = self.watchers.read();
let mut triggered = Vec::new();
for length in 0..=key.len() {
if watchers.contains_key(&key[..length]) {
triggered.push(&key[..length]);
}
}
drop(watchers);
if !triggered.is_empty() {
let mut watchers = self.watchers.write();
for prefix in triggered {
if let Some(tx) = watchers.remove(prefix) {
let _ = tx.0.send(());
}
}
};
Ok(())
}
#[tracing::instrument(skip(self, iter))]
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
let guard = self.engine.write_lock();
guard.execute("BEGIN", [])?;
for (key, value) in iter {
self.insert_with_guard(&guard, &key, &value)?;
}
guard.execute("COMMIT", [])?;
drop(guard);
Ok(())
}
#[tracing::instrument(skip(self, iter))]
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
let guard = self.engine.write_lock();
guard.execute("BEGIN", [])?;
for key in iter {
let old = self.get_with_guard(&guard, &key)?;
let new = crate::utils::increment(old.as_deref())
.expect("utils::increment always returns Some");
self.insert_with_guard(&guard, &key, &new)?;
}
guard.execute("COMMIT", [])?;
drop(guard);
Ok(())
}
#[tracing::instrument(skip(self, key))]
fn remove(&self, key: &[u8]) -> Result<()> {
let guard = self.engine.write_lock();
guard.execute(
format!("DELETE FROM {} WHERE key = ?", self.name).as_str(),
[key],
)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = TupleOfBytes> + 'a> {
let guard = self.engine.read_lock_iterator();
self.iter_with_guard(guard)
}
#[tracing::instrument(skip(self, from, backwards))]
fn iter_from<'a>(
&'a self,
from: &[u8],
backwards: bool,
) -> Box<dyn Iterator<Item = TupleOfBytes> + 'a> {
let guard = self.engine.read_lock_iterator();
let from = from.to_vec(); // TODO change interface?
//let name = self.name.clone();
if backwards {
let statement = Box::leak(Box::new(
guard
.prepare(&format!(
"SELECT key, value FROM {} WHERE key <= ? ORDER BY key DESC",
&self.name
))
.unwrap(),
));
let statement_ref = NonAliasingBox(statement);
let iterator = Box::new(
statement
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
.unwrap()
.map(move |r| {
//dbg!(&name);
r.unwrap()
}),
);
Box::new(PreparedStatementIterator {
iterator,
statement_ref,
})
} else {
let statement = Box::leak(Box::new(
guard
.prepare(&format!(
"SELECT key, value FROM {} WHERE key >= ? ORDER BY key ASC",
&self.name
))
.unwrap(),
));
let statement_ref = NonAliasingBox(statement);
let iterator = Box::new(
statement
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
.unwrap()
.map(move |r| {
//dbg!(&name);
r.unwrap()
}),
);
Box::new(PreparedStatementIterator {
iterator,
statement_ref,
})
}
}
#[tracing::instrument(skip(self, key))]
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
let guard = self.engine.write_lock();
let old = self.get_with_guard(&guard, key)?;
let new =
crate::utils::increment(old.as_deref()).expect("utils::increment always returns Some");
self.insert_with_guard(&guard, key, &new)?;
Ok(new)
}
#[tracing::instrument(skip(self, prefix))]
fn scan_prefix<'a>(&'a self, prefix: Vec<u8>) -> Box<dyn Iterator<Item = TupleOfBytes> + 'a> {
Box::new(
self.iter_from(&prefix, false)
.take_while(move |(key, _)| key.starts_with(&prefix)),
)
}
#[tracing::instrument(skip(self, prefix))]
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
let mut rx = match self.watchers.write().entry(prefix.to_vec()) {
hash_map::Entry::Occupied(o) => o.get().1.clone(),
hash_map::Entry::Vacant(v) => {
let (tx, rx) = tokio::sync::watch::channel(());
v.insert((tx, rx.clone()));
rx
}
};
Box::pin(async move {
// Tx is never destroyed
rx.changed().await.unwrap();
})
}
#[tracing::instrument(skip(self))]
fn clear(&self) -> Result<()> {
debug!("clear: running");
self.engine
.write_lock()
.execute(format!("DELETE FROM {}", self.name).as_str(), [])?;
debug!("clear: ran");
Ok(())
}
}

89
src/database/account_data.rs

@ -12,12 +12,10 @@ use super::abstraction::Tree; @@ -12,12 +12,10 @@ use super::abstraction::Tree;
pub struct AccountData {
pub(super) roomuserdataid_accountdata: Arc<dyn Tree>, // RoomUserDataId = Room + User + Count + Type
pub(super) roomusertype_roomuserdataid: Arc<dyn Tree>, // RoomUserType = Room + User + Type
}
impl AccountData {
/// Places one event in the account data of the user and removes the previous entry.
#[tracing::instrument(skip(self, room_id, user_id, event_type, data, globals))]
pub fn update<T: Serialize>(
&self,
room_id: Option<&RoomId>,
@ -32,16 +30,18 @@ impl AccountData { @@ -32,16 +30,18 @@ impl AccountData {
.as_bytes()
.to_vec();
prefix.push(0xff);
prefix.extend_from_slice(user_id.as_bytes());
prefix.extend_from_slice(&user_id.as_bytes());
prefix.push(0xff);
let mut roomuserdataid = prefix.clone();
roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes());
roomuserdataid.push(0xff);
roomuserdataid.extend_from_slice(event_type.as_bytes());
// Remove old entry
if let Some((old_key, _)) = self.find_event(room_id, user_id, &event_type)? {
self.roomuserdataid_accountdata.remove(&old_key)?;
}
let mut key = prefix;
key.extend_from_slice(event_type.as_bytes());
key.extend_from_slice(&globals.next_count()?.to_be_bytes());
key.push(0xff);
key.extend_from_slice(event_type.as_ref().as_bytes());
let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling
if json.get("type").is_none() || json.get("content").is_none() {
@ -52,58 +52,29 @@ impl AccountData { @@ -52,58 +52,29 @@ impl AccountData {
}
self.roomuserdataid_accountdata.insert(
&roomuserdataid,
&key,
&serde_json::to_vec(&json).expect("to_vec always works on json values"),
)?;
let prev = self.roomusertype_roomuserdataid.get(&key)?;
self.roomusertype_roomuserdataid
.insert(&key, &roomuserdataid)?;
// Remove old entry
if let Some(prev) = prev {
self.roomuserdataid_accountdata.remove(&prev)?;
}
Ok(())
}
/// Searches the account data for a specific kind.
#[tracing::instrument(skip(self, room_id, user_id, kind))]
pub fn get<T: DeserializeOwned>(
&self,
room_id: Option<&RoomId>,
user_id: &UserId,
kind: EventType,
) -> Result<Option<T>> {
let mut key = room_id
.map(|r| r.to_string())
.unwrap_or_default()
.as_bytes()
.to_vec();
key.push(0xff);
key.extend_from_slice(user_id.as_bytes());
key.push(0xff);
key.extend_from_slice(kind.as_ref().as_bytes());
self.roomusertype_roomuserdataid
.get(&key)?
.and_then(|roomuserdataid| {
self.roomuserdataid_accountdata
.get(&roomuserdataid)
.transpose()
})
.transpose()?
.map(|data| {
serde_json::from_slice(&data)
.map_err(|_| Error::bad_database("could not deserialize"))
self.find_event(room_id, user_id, &kind)?
.map(|(_, v)| {
serde_json::from_slice(&v).map_err(|_| Error::bad_database("could not deserialize"))
})
.transpose()
}
/// Returns all changes to the account data that happened after `since`.
#[tracing::instrument(skip(self, room_id, user_id, since))]
#[tracing::instrument(skip(self))]
pub fn changes_since(
&self,
room_id: Option<&RoomId>,
@ -118,7 +89,7 @@ impl AccountData { @@ -118,7 +89,7 @@ impl AccountData {
.as_bytes()
.to_vec();
prefix.push(0xff);
prefix.extend_from_slice(user_id.as_bytes());
prefix.extend_from_slice(&user_id.as_bytes());
prefix.push(0xff);
// Skip the data that's exactly at since, because we sent that last time
@ -150,4 +121,36 @@ impl AccountData { @@ -150,4 +121,36 @@ impl AccountData {
Ok(userdata)
}
fn find_event(
&self,
room_id: Option<&RoomId>,
user_id: &UserId,
kind: &EventType,
) -> Result<Option<(Box<[u8]>, Box<[u8]>)>> {
let mut prefix = room_id
.map(|r| r.to_string())
.unwrap_or_default()
.as_bytes()
.to_vec();
prefix.push(0xff);
prefix.extend_from_slice(&user_id.as_bytes());
prefix.push(0xff);
let mut last_possible_key = prefix.clone();
last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes());
let kind = kind.clone();
Ok(self
.roomuserdataid_accountdata
.iter_from(&last_possible_key, true)
.take_while(move |(k, _)| k.starts_with(&prefix))
.find(move |(k, _)| {
k.rsplit(|&b| b == 0xff)
.next()
.map(|current_event_type| current_event_type == kind.as_ref().as_bytes())
.unwrap_or(false)
}))
}
}

97
src/database/admin.rs

@ -1,19 +1,20 @@ @@ -1,19 +1,20 @@
use std::{convert::TryInto, sync::Arc};
use std::{
convert::{TryFrom, TryInto},
sync::Arc,
};
use crate::{pdu::PduBuilder, Database};
use log::warn;
use rocket::futures::{channel::mpsc, stream::StreamExt};
use ruma::{
events::{room::message::RoomMessageEventContent, EventType},
events::{room::message, EventType},
UserId,
};
use serde_json::value::to_raw_value;
use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard};
use tracing::warn;
pub enum AdminCommand {
RegisterAppservice(serde_yaml::Value),
ListAppservices,
SendMessage(RoomMessageEventContent),
SendMessage(message::MessageEventContent),
}
#[derive(Clone)]
@ -24,97 +25,73 @@ pub struct Admin { @@ -24,97 +25,73 @@ pub struct Admin {
impl Admin {
pub fn start_handler(
&self,
db: Arc<RwLock<Database>>,
db: Arc<Database>,
mut receiver: mpsc::UnboundedReceiver<AdminCommand>,
) {
tokio::spawn(async move {
// TODO: Use futures when we have long admin commands
//let mut futures = FuturesUnordered::new();
let guard = db.read().await;
let conduit_user = UserId::parse(format!("@conduit:{}", guard.globals.server_name()))
let conduit_user = UserId::try_from(format!("@conduit:{}", db.globals.server_name()))
.expect("@conduit:server_name is valid");
let conduit_room = guard
let conduit_room = db
.rooms
.id_from_alias(
format!("#admins:{}", guard.globals.server_name())
.as_str()
&format!("#admins:{}", db.globals.server_name())
.try_into()
.expect("#admins:server_name is a valid room alias"),
)
.unwrap();
let conduit_room = match conduit_room {
None => {
warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this.");
return;
}
Some(r) => r,
};
drop(guard);
if conduit_room.is_none() {
warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this.");
}
let send_message = |message: RoomMessageEventContent,
guard: RwLockReadGuard<'_, Database>,
mutex_lock: &MutexGuard<'_, ()>| {
guard
.rooms
.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMessage,
content: to_raw_value(&message)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: None,
},
&conduit_user,
&conduit_room,
&guard,
mutex_lock,
)
.unwrap();
let send_message = |message: message::MessageEventContent| {
if let Some(conduit_room) = &conduit_room {
db.rooms
.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMessage,
content: serde_json::to_value(message)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: None,
},
&conduit_user,
&conduit_room,
&db,
)
.unwrap();
}
};
loop {
tokio::select! {
Some(event) = receiver.next() => {
let guard = db.read().await;
let mutex_state = Arc::clone(
guard.globals
.roomid_mutex_state
.write()
.unwrap()
.entry(conduit_room.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
match event {
AdminCommand::RegisterAppservice(yaml) => {
guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error
db.appservice.register_appservice(yaml).unwrap(); // TODO handle error
}
AdminCommand::ListAppservices => {
if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::<Vec<_>>()) {
if let Ok(appservices) = db.appservice.iter_ids().map(|ids| ids.collect::<Vec<_>>()) {
let count = appservices.len();
let output = format!(
"Appservices ({}): {}",
count,
appservices.into_iter().filter_map(|r| r.ok()).collect::<Vec<_>>().join(", ")
);
send_message(RoomMessageEventContent::text_plain(output), guard, &state_lock);
send_message(message::MessageEventContent::text_plain(output));
} else {
send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock);
send_message(message::MessageEventContent::text_plain("Failed to get appservices."));
}
}
AdminCommand::SendMessage(message) => {
send_message(message, guard, &state_lock);
send_message(message);
}
}
drop(state_lock);
}
}
}

23
src/database/appservice.rs

@ -49,23 +49,22 @@ impl Appservice { @@ -49,23 +49,22 @@ impl Appservice {
)
}
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + '_> {
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + Send + Sync + '_> {
Ok(self.id_appserviceregistrations.iter().map(|(id, _)| {
utils::string_from_bytes(&id)
.map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations."))
}))
}
pub fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
self.iter_ids()?
.filter_map(|id| id.ok())
.map(move |id| {
Ok((
id.clone(),
self.get_registration(&id)?
.expect("iter_ids only returns appservices that exist"),
))
})
.collect()
pub fn iter_all(
&self,
) -> Result<impl Iterator<Item = Result<(String, serde_yaml::Value)>> + '_ + Send + Sync> {
Ok(self.iter_ids()?.filter_map(|id| id.ok()).map(move |id| {
Ok((
id.clone(),
self.get_registration(&id)?
.expect("iter_ids only returns appservices that exist"),
))
}))
}
}

186
src/database/globals.rs

@ -1,82 +1,84 @@ @@ -1,82 +1,84 @@
use crate::{database::Config, server_server::FedDest, utils, ConduitResult, Error, Result};
use crate::{database::Config, utils, ConduitResult, Error, Result};
use log::{error, info};
use ruma::{
api::{
client::r0::sync::sync_events,
federation::discovery::{ServerSigningKeys, VerifyKey},
},
DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId,
DeviceId, EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId,
};
use rustls::{ServerCertVerifier, WebPKIVerifier};
use std::{
collections::{BTreeMap, HashMap},
fs,
future::Future,
net::IpAddr,
path::PathBuf,
sync::{Arc, Mutex, RwLock},
sync::{Arc, RwLock},
time::{Duration, Instant},
};
use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore};
use tracing::error;
use tokio::sync::Semaphore;
use trust_dns_resolver::TokioAsyncResolver;
use super::abstraction::Tree;
pub const COUNTER: &[u8] = b"c";
type WellKnownMap = HashMap<Box<ServerName>, (FedDest, String)>;
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
type WellKnownMap = HashMap<Box<ServerName>, (String, String)>;
type TlsNameMap = HashMap<String, webpki::DNSName>;
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
type SyncHandle = (
Option<String>, // since
Receiver<Option<ConduitResult<sync_events::Response>>>, // rx
);
pub struct Globals {
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
pub tls_name_override: Arc<RwLock<TlsNameMap>>,
pub(super) globals: Arc<dyn Tree>,
config: Config,
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
reqwest_client: reqwest::Client,
dns_resolver: TokioAsyncResolver,
jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>,
pub(super) server_signingkeys: Arc<dyn Tree>,
pub bad_event_ratelimiter: Arc<RwLock<HashMap<Box<EventId>, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<HashMap<Box<ServerName>, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<HashMap<(Box<UserId>, Box<DeviceId>), SyncHandle>>,
pub roomid_mutex_insert: RwLock<HashMap<Box<RoomId>, Arc<Mutex<()>>>>,
pub roomid_mutex_state: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>, // this lock will be held longer
pub rotate: RotationHandler,
pub bad_event_ratelimiter: Arc<RwLock<BTreeMap<EventId, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<BTreeMap<Vec<String>, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<BTreeMap<Box<ServerName>, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<
BTreeMap<
(UserId, Box<DeviceId>),
(
Option<String>,
tokio::sync::watch::Receiver<Option<ConduitResult<sync_events::Response>>>,
), // since, rx
>,
>,
}
/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like.
///
/// This is utilized to have sync workers return early and release read locks on the database.
pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>);
impl RotationHandler {
pub fn new() -> Self {
let (s, r) = broadcast::channel(1);
Self(s, r)
}
pub fn watch(&self) -> impl Future<Output = ()> {
let mut r = self.0.subscribe();
async move {
let _ = r.recv().await;
}
}
pub fn fire(&self) {
let _ = self.0.send(());
}
struct MatrixServerVerifier {
inner: WebPKIVerifier,
tls_name_override: Arc<RwLock<TlsNameMap>>,
}
impl Default for RotationHandler {
fn default() -> Self {
Self::new()
impl ServerCertVerifier for MatrixServerVerifier {
fn verify_server_cert(
&self,
roots: &rustls::RootCertStore,
presented_certs: &[rustls::Certificate],
dns_name: webpki::DNSNameRef<'_>,
ocsp_response: &[u8],
) -> std::result::Result<rustls::ServerCertVerified, rustls::TLSError> {
if let Some(override_name) = self.tls_name_override.read().unwrap().get(dns_name.into()) {
let result = self.inner.verify_server_cert(
roots,
presented_certs,
override_name.as_ref(),
ocsp_response,
);
if result.is_ok() {
return result;
}
info!(
"Server {:?} is non-compliant, retrying TLS verification with original name",
dns_name
);
}
self.inner
.verify_server_cert(roots, presented_certs, dns_name, ocsp_response)
}
}
@ -112,7 +114,7 @@ impl Globals { @@ -112,7 +114,7 @@ impl Globals {
.map(|key| (version, key))
})
.and_then(|(version, key)| {
ruma::signatures::Ed25519KeyPair::from_der(key, version)
ruma::signatures::Ed25519KeyPair::from_der(&key, version)
.map_err(|_| Error::bad_database("Private or public keys are invalid."))
});
@ -126,6 +128,22 @@ impl Globals { @@ -126,6 +128,22 @@ impl Globals {
};
let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new()));
let verifier = Arc::new(MatrixServerVerifier {
inner: WebPKIVerifier::new(),
tls_name_override: tls_name_override.clone(),
});
let mut tlsconfig = rustls::ClientConfig::new();
tlsconfig.dangerous().set_certificate_verifier(verifier);
tlsconfig.root_store =
rustls_native_certs::load_native_certs().expect("Error loading system certificates");
let reqwest_client = reqwest::Client::builder()
.connect_timeout(Duration::from_secs(30))
.timeout(Duration::from_secs(60 * 3))
.pool_max_idle_per_host(1)
.use_preconfigured_tls(tlsconfig)
.build()
.unwrap();
let jwt_decoding_key = config
.jwt_secret
@ -136,6 +154,7 @@ impl Globals { @@ -136,6 +154,7 @@ impl Globals {
globals,
config,
keypair: Arc::new(keypair),
reqwest_client,
dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| {
Error::bad_config("Failed to set up trust dns resolver with system config.")
})?,
@ -143,14 +162,10 @@ impl Globals { @@ -143,14 +162,10 @@ impl Globals {
tls_name_override,
server_signingkeys,
jwt_decoding_key,
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
roomid_mutex_state: RwLock::new(HashMap::new()),
roomid_mutex_insert: RwLock::new(HashMap::new()),
roomid_mutex_federation: RwLock::new(HashMap::new()),
sync_receivers: RwLock::new(HashMap::new()),
rotate: RotationHandler::new(),
bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
sync_receivers: RwLock::new(BTreeMap::new()),
};
fs::create_dir_all(s.get_media_folder())?;
@ -164,25 +179,15 @@ impl Globals { @@ -164,25 +179,15 @@ impl Globals {
}
/// Returns a reqwest client which can be used to send requests.
pub fn reqwest_client(&self) -> Result<reqwest::ClientBuilder> {
let mut reqwest_client_builder = reqwest::Client::builder()
.connect_timeout(Duration::from_secs(30))
.timeout(Duration::from_secs(60 * 3))
.pool_max_idle_per_host(1);
if let Some(proxy) = self.config.proxy.to_proxy()? {
reqwest_client_builder = reqwest_client_builder.proxy(proxy);
}
Ok(reqwest_client_builder)
pub fn reqwest_client(&self) -> &reqwest::Client {
&self.reqwest_client
}
#[tracing::instrument(skip(self))]
pub fn next_count(&self) -> Result<u64> {
utils::u64_from_bytes(&self.globals.increment(COUNTER)?)
.map_err(|_| Error::bad_database("Count has invalid bytes."))
}
#[tracing::instrument(skip(self))]
pub fn current_count(&self) -> Result<u64> {
self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| {
utils::u64_from_bytes(&bytes)
@ -210,10 +215,6 @@ impl Globals { @@ -210,10 +215,6 @@ impl Globals {
self.config.allow_federation
}
pub fn allow_room_creation(&self) -> bool {
self.config.allow_room_creation
}
pub fn trusted_servers(&self) -> &[Box<ServerName>] {
&self.config.trusted_servers
}
@ -226,35 +227,11 @@ impl Globals { @@ -226,35 +227,11 @@ impl Globals {
self.jwt_decoding_key.as_ref()
}
pub fn turn_password(&self) -> &String {
&self.config.turn_password
}
pub fn turn_ttl(&self) -> u64 {
self.config.turn_ttl
}
pub fn turn_uris(&self) -> &[String] {
&self.config.turn_uris
}
pub fn turn_username(&self) -> &String {
&self.config.turn_username
}
pub fn turn_secret(&self) -> &String {
&self.config.turn_secret
}
/// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones.
///
/// This doesn't actually check that the keys provided are newer than the old set.
pub fn add_signing_key(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<BTreeMap<Box<ServerSigningKeyId>, VerifyKey>> {
pub fn add_signing_key(&self, origin: &ServerName, new_keys: ServerSigningKeys) -> Result<()> {
// Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
@ -279,26 +256,19 @@ impl Globals { @@ -279,26 +256,19 @@ impl Globals {
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
)?;
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys
.into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
Ok(tree)
Ok(())
}
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
pub fn signing_keys_for(
&self,
origin: &ServerName,
) -> Result<BTreeMap<Box<ServerSigningKeyId>, VerifyKey>> {
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> {
let signingkeys = self
.server_signingkeys
.get(origin.as_bytes())?
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
.map(|keys: ServerSigningKeys| {
.and_then(|bytes| serde_json::from_slice::<ServerSigningKeys>(&bytes).ok())
.map(|keys| {
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys

66
src/database/key_backups.rs

@ -6,7 +6,7 @@ use ruma::{ @@ -6,7 +6,7 @@ use ruma::{
},
RoomId, UserId,
};
use std::{collections::BTreeMap, sync::Arc};
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc};
use super::abstraction::Tree;
@ -27,7 +27,7 @@ impl KeyBackups { @@ -27,7 +27,7 @@ impl KeyBackups {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(version.as_bytes());
key.extend_from_slice(&version.as_bytes());
self.backupid_algorithm.insert(
&key,
@ -41,7 +41,7 @@ impl KeyBackups { @@ -41,7 +41,7 @@ impl KeyBackups {
pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(version.as_bytes());
key.extend_from_slice(&version.as_bytes());
self.backupid_algorithm.remove(&key)?;
self.backupid_etag.remove(&key)?;
@ -64,7 +64,7 @@ impl KeyBackups { @@ -64,7 +64,7 @@ impl KeyBackups {
) -> Result<String> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(version.as_bytes());
key.extend_from_slice(&version.as_bytes());
if self.backupid_algorithm.get(&key)?.is_none() {
return Err(Error::BadRequest(
@ -75,34 +75,13 @@ impl KeyBackups { @@ -75,34 +75,13 @@ impl KeyBackups {
self.backupid_algorithm.insert(
&key,
serde_json::to_string(backup_metadata)
&serde_json::to_string(backup_metadata)
.expect("BackupAlgorithm::to_string always works")
.as_bytes(),
)?;
self.backupid_etag
.insert(&key, &globals.next_count()?.to_be_bytes())?;
Ok(version.to_owned())
}
pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result<Option<String>> {
let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff);
let mut last_possible_key = prefix.clone();
last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes());
self.backupid_algorithm
.iter_from(&last_possible_key, true)
.take_while(move |(k, _)| k.starts_with(&prefix))
.next()
.map(|(key, _)| {
utils::string_from_bytes(
key.rsplit(|&b| b == 0xff)
.next()
.expect("rsplit always returns an element"),
)
.map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))
})
.transpose()
Ok(version.to_string())
}
pub fn get_latest_backup(&self, user_id: &UserId) -> Result<Option<(String, BackupAlgorithm)>> {
@ -115,7 +94,7 @@ impl KeyBackups { @@ -115,7 +94,7 @@ impl KeyBackups {
.iter_from(&last_possible_key, true)
.take_while(move |(k, _)| k.starts_with(&prefix))
.next()
.map(|(key, value)| {
.map_or(Ok(None), |(key, value)| {
let version = utils::string_from_bytes(
key.rsplit(|&b| b == 0xff)
.next()
@ -123,14 +102,13 @@ impl KeyBackups { @@ -123,14 +102,13 @@ impl KeyBackups {
)
.map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?;
Ok((
Ok(Some((
version,
serde_json::from_slice(&value).map_err(|_| {
Error::bad_database("Algorithm in backupid_algorithm is invalid.")
})?,
))
)))
})
.transpose()
}
pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result<Option<BackupAlgorithm>> {
@ -193,7 +171,7 @@ impl KeyBackups { @@ -193,7 +171,7 @@ impl KeyBackups {
pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result<String> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(version.as_bytes());
key.extend_from_slice(&version.as_bytes());
Ok(utils::u64_from_bytes(
&self
@ -209,13 +187,13 @@ impl KeyBackups { @@ -209,13 +187,13 @@ impl KeyBackups {
&self,
user_id: &UserId,
version: &str,
) -> Result<BTreeMap<Box<RoomId>, RoomKeyBackup>> {
) -> Result<BTreeMap<RoomId, RoomKeyBackup>> {
let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff);
prefix.extend_from_slice(version.as_bytes());
prefix.push(0xff);
let mut rooms = BTreeMap::<Box<RoomId>, RoomKeyBackup>::new();
let mut rooms = BTreeMap::<RoomId, RoomKeyBackup>::new();
for result in self
.backupkeyid_backup
@ -224,15 +202,15 @@ impl KeyBackups { @@ -224,15 +202,15 @@ impl KeyBackups {
let mut parts = key.rsplit(|&b| b == 0xff);
let session_id =
utils::string_from_bytes(parts.next().ok_or_else(|| {
utils::string_from_bytes(&parts.next().ok_or_else(|| {
Error::bad_database("backupkeyid_backup key is invalid.")
})?)
.map_err(|_| {
Error::bad_database("backupkeyid_backup session_id is invalid.")
})?;
let room_id = RoomId::parse(
utils::string_from_bytes(parts.next().ok_or_else(|| {
let room_id = RoomId::try_from(
utils::string_from_bytes(&parts.next().ok_or_else(|| {
Error::bad_database("backupkeyid_backup key is invalid.")
})?)
.map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?,
@ -281,7 +259,7 @@ impl KeyBackups { @@ -281,7 +259,7 @@ impl KeyBackups {
let mut parts = key.rsplit(|&b| b == 0xff);
let session_id =
utils::string_from_bytes(parts.next().ok_or_else(|| {
utils::string_from_bytes(&parts.next().ok_or_else(|| {
Error::bad_database("backupkeyid_backup key is invalid.")
})?)
.map_err(|_| {
@ -326,7 +304,7 @@ impl KeyBackups { @@ -326,7 +304,7 @@ impl KeyBackups {
pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(version.as_bytes());
key.extend_from_slice(&version.as_bytes());
key.push(0xff);
for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) {
@ -344,9 +322,9 @@ impl KeyBackups { @@ -344,9 +322,9 @@ impl KeyBackups {
) -> Result<()> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(version.as_bytes());
key.extend_from_slice(&version.as_bytes());
key.push(0xff);
key.extend_from_slice(room_id.as_bytes());
key.extend_from_slice(&room_id.as_bytes());
key.push(0xff);
for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) {
@ -365,11 +343,11 @@ impl KeyBackups { @@ -365,11 +343,11 @@ impl KeyBackups {
) -> Result<()> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(version.as_bytes());
key.extend_from_slice(&version.as_bytes());
key.push(0xff);
key.extend_from_slice(room_id.as_bytes());
key.extend_from_slice(&room_id.as_bytes());
key.push(0xff);
key.extend_from_slice(session_id.as_bytes());
key.extend_from_slice(&session_id.as_bytes());
for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) {
self.backupkeyid_backup.remove(&outdated_key)?;

16
src/database/media.rs

@ -4,10 +4,7 @@ use image::{imageops::FilterType, GenericImageView}; @@ -4,10 +4,7 @@ use image::{imageops::FilterType, GenericImageView};
use super::abstraction::Tree;
use crate::{utils, Error, Result};
use std::{mem, sync::Arc};
use tokio::{
fs::File,
io::{AsyncReadExt, AsyncWriteExt},
};
use tokio::{fs::File, io::AsyncReadExt, io::AsyncWriteExt};
pub struct FileMeta {
pub content_disposition: Option<String>,
@ -57,7 +54,6 @@ impl Media { @@ -57,7 +54,6 @@ impl Media {
}
/// Uploads or replaces a file thumbnail.
#[allow(clippy::too_many_arguments)]
pub async fn upload_thumbnail(
&self,
mxc: String,
@ -104,8 +100,8 @@ impl Media { @@ -104,8 +100,8 @@ impl Media {
prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail
prefix.push(0xff);
let first = self.mediaid_file.scan_prefix(prefix).next();
if let Some((key, _)) = first {
let mut iter = self.mediaid_file.scan_prefix(prefix);
if let Some((key, _)) = iter.next() {
let path = globals.get_media_file(&key);
let mut file = Vec::new();
File::open(path).await?.read_to_end(&mut file).await?;
@ -193,9 +189,7 @@ impl Media { @@ -193,9 +189,7 @@ impl Media {
original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail
original_prefix.push(0xff);
let first_thumbnailprefix = self.mediaid_file.scan_prefix(thumbnail_prefix).next();
let first_originalprefix = self.mediaid_file.scan_prefix(original_prefix).next();
if let Some((key, _)) = first_thumbnailprefix {
if let Some((key, _)) = self.mediaid_file.scan_prefix(thumbnail_prefix).next() {
// Using saved thumbnail
let path = globals.get_media_file(&key);
let mut file = Vec::new();
@ -230,7 +224,7 @@ impl Media { @@ -230,7 +224,7 @@ impl Media {
content_type,
file: file.to_vec(),
}))
} else if let Some((key, _)) = first_originalprefix {
} else if let Some((key, _)) = self.mediaid_file.scan_prefix(original_prefix).next() {
// Generate a thumbnail
let path = globals.get_media_file(&key);
let mut file = Vec::new();

146
src/database/proxy.rs

@ -1,146 +0,0 @@ @@ -1,146 +0,0 @@
use reqwest::{Proxy, Url};
use serde::Deserialize;
use crate::Result;
/// ## Examples:
/// - No proxy (default):
/// ```toml
/// proxy ="none"
/// ```
/// - Global proxy
/// ```toml
/// [proxy]
/// global = { url = "socks5h://localhost:9050" }
/// ```
/// - Proxy some domains
/// ```toml
/// [proxy]
/// [[proxy.by_domain]]
/// url = "socks5h://localhost:9050"
/// include = ["*.onion", "matrix.myspecial.onion"]
/// exclude = ["*.myspecial.onion"]
/// ```
/// ## Include vs. Exclude
/// If include is an empty list, it is assumed to be `["*"]`.
///
/// If a domain matches both the exclude and include list, the proxy will only be used if it was
/// included because of a more specific rule than it was excluded. In the above example, the proxy
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ProxyConfig {
None,
Global {
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
url: Url,
},
ByDomain(Vec<PartialProxyConfig>),
}
impl ProxyConfig {
pub fn to_proxy(&self) -> Result<Option<Proxy>> {
Ok(match self.clone() {
ProxyConfig::None => None,
ProxyConfig::Global { url } => Some(Proxy::all(url)?),
ProxyConfig::ByDomain(proxies) => Some(Proxy::custom(move |url| {
proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() // first matching proxy
})),
})
}
}
impl Default for ProxyConfig {
fn default() -> Self {
ProxyConfig::None
}
}
#[derive(Clone, Debug, Deserialize)]
pub struct PartialProxyConfig {
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
url: Url,
#[serde(default)]
include: Vec<WildCardedDomain>,
#[serde(default)]
exclude: Vec<WildCardedDomain>,
}
impl PartialProxyConfig {
pub fn for_url(&self, url: &Url) -> Option<&Url> {
let domain = url.domain()?;
let mut included_because = None; // most specific reason it was included
let mut excluded_because = None; // most specific reason it was excluded
if self.include.is_empty() {
// treat empty include list as `*`
included_because = Some(&WildCardedDomain::WildCard)
}
for wc_domain in &self.include {
if wc_domain.matches(domain) {
match included_because {
Some(prev) if !wc_domain.more_specific_than(prev) => (),
_ => included_because = Some(wc_domain),
}
}
}
for wc_domain in &self.exclude {
if wc_domain.matches(domain) {
match excluded_because {
Some(prev) if !wc_domain.more_specific_than(prev) => (),
_ => excluded_because = Some(wc_domain),
}
}
}
match (included_because, excluded_because) {
(Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), // included for a more specific reason than excluded
(Some(_), None) => Some(&self.url),
_ => None,
}
}
}
/// A domain name, that optionally allows a * as its first subdomain.
#[derive(Clone, Debug)]
pub enum WildCardedDomain {
WildCard,
WildCarded(String),
Exact(String),
}
impl WildCardedDomain {
pub fn matches(&self, domain: &str) -> bool {
match self {
WildCardedDomain::WildCard => true,
WildCardedDomain::WildCarded(d) => domain.ends_with(d),
WildCardedDomain::Exact(d) => domain == d,
}
}
pub fn more_specific_than(&self, other: &Self) -> bool {
match (self, other) {
(WildCardedDomain::WildCard, WildCardedDomain::WildCard) => false,
(_, WildCardedDomain::WildCard) => true,
(WildCardedDomain::Exact(a), WildCardedDomain::WildCarded(_)) => other.matches(a),
(WildCardedDomain::WildCarded(a), WildCardedDomain::WildCarded(b)) => {
a != b && a.ends_with(b)
}
_ => false,
}
}
}
impl std::str::FromStr for WildCardedDomain {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// maybe do some domain validation?
Ok(if s.starts_with("*.") {
WildCardedDomain::WildCarded(s[1..].to_owned())
} else if s == "*" {
WildCardedDomain::WildCarded("".to_owned())
} else {
WildCardedDomain::Exact(s.to_owned())
})
}
}
impl<'de> Deserialize<'de> for WildCardedDomain {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
crate::utils::deserialize_from_str(deserializer)
}
}

95
src/database/pusher.rs

@ -1,5 +1,6 @@ @@ -1,5 +1,6 @@
use crate::{Database, Error, PduEvent, Result};
use bytes::BytesMut;
use log::{error, info, warn};
use ruma::{
api::{
client::r0::push::{get_pushers, set_pusher, PusherKind},
@ -9,15 +10,10 @@ use ruma::{ @@ -9,15 +10,10 @@ use ruma::{
},
IncomingResponse, OutgoingRequest, SendAccessToken,
},
events::{
room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent},
AnySyncRoomEvent, EventType,
},
events::{room::power_levels::PowerLevelsEventContent, EventType},
push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak},
serde::Raw,
uint, RoomId, UInt, UserId,
uint, UInt, UserId,
};
use tracing::{error, info, warn};
use std::{convert::TryFrom, fmt::Debug, mem, sync::Arc};
@ -29,7 +25,6 @@ pub struct PushData { @@ -29,7 +25,6 @@ pub struct PushData {
}
impl PushData {
#[tracing::instrument(skip(self, sender, pusher))]
pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::Pusher) -> Result<()> {
let mut key = sender.as_bytes().to_vec();
key.push(0xff);
@ -52,7 +47,6 @@ impl PushData { @@ -52,7 +47,6 @@ impl PushData {
Ok(())
}
#[tracing::instrument(skip(self, senderkey))]
pub fn get_pusher(&self, senderkey: &[u8]) -> Result<Option<get_pushers::Pusher>> {
self.senderkey_pusher
.get(senderkey)?
@ -63,7 +57,6 @@ impl PushData { @@ -63,7 +57,6 @@ impl PushData {
.transpose()
}
#[tracing::instrument(skip(self, sender))]
pub fn get_pushers(&self, sender: &UserId) -> Result<Vec<get_pushers::Pusher>> {
let mut prefix = sender.as_bytes().to_vec();
prefix.push(0xff);
@ -77,11 +70,10 @@ impl PushData { @@ -77,11 +70,10 @@ impl PushData {
.collect()
}
#[tracing::instrument(skip(self, sender))]
pub fn get_pusher_senderkeys<'a>(
&'a self,
sender: &UserId,
) -> impl Iterator<Item = Vec<u8>> + 'a {
) -> impl Iterator<Item = Box<[u8]>> + 'a {
let mut prefix = sender.as_bytes().to_vec();
prefix.push(0xff);
@ -89,7 +81,6 @@ impl PushData { @@ -89,7 +81,6 @@ impl PushData {
}
}
#[tracing::instrument(skip(globals, destination, request))]
pub async fn send_request<T: OutgoingRequest>(
globals: &crate::database::globals::Globals,
destination: &str,
@ -115,11 +106,7 @@ where @@ -115,11 +106,7 @@ where
//*reqwest_request.timeout_mut() = Some(Duration::from_secs(5));
let url = reqwest_request.url().clone();
let response = globals
.reqwest_client()?
.build()?
.execute(reqwest_request)
.await;
let response = globals.reqwest_client().execute(reqwest_request).await;
match response {
Ok(mut response) => {
@ -167,7 +154,6 @@ where @@ -167,7 +154,6 @@ where
}
}
#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))]
pub async fn send_push_notice(
user: &UserId,
unread: UInt,
@ -179,24 +165,7 @@ pub async fn send_push_notice( @@ -179,24 +165,7 @@ pub async fn send_push_notice(
let mut notify = None;
let mut tweaks = Vec::new();
let power_levels: RoomPowerLevelsEventContent = db
.rooms
.room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")?
.map(|ev| {
serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
})
.transpose()?
.unwrap_or_default();
for action in get_actions(
user,
&ruleset,
&power_levels,
&pdu.to_sync_room_event(),
&pdu.room_id,
db,
)? {
for action in get_actions(user, &ruleset, pdu, db)? {
let n = match action {
Action::DontNotify => false,
// TODO: Implement proper support for coalesce
@ -224,31 +193,37 @@ pub async fn send_push_notice( @@ -224,31 +193,37 @@ pub async fn send_push_notice(
Ok(())
}
#[tracing::instrument(skip(user, ruleset, pdu, db))]
pub fn get_actions<'a>(
user: &UserId,
ruleset: &'a Ruleset,
power_levels: &RoomPowerLevelsEventContent,
pdu: &Raw<AnySyncRoomEvent>,
room_id: &RoomId,
pdu: &PduEvent,
db: &Database,
) -> Result<&'a [Action]> {
let power_levels: PowerLevelsEventContent = db
.rooms
.room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")?
.map(|ev| {
serde_json::from_value(ev.content.clone())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
})
.transpose()?
.unwrap_or_default();
let ctx = PushConditionRoomCtx {
room_id: room_id.to_owned(),
room_id: pdu.room_id.clone(),
member_count: 10_u32.into(), // TODO: get member count efficiently
user_display_name: db
.users
.displayname(user)?
.displayname(&user)?
.unwrap_or_else(|| user.localpart().to_owned()),
users_power_levels: power_levels.users.clone(),
users_power_levels: power_levels.users,
default_power_level: power_levels.users_default,
notification_power_levels: power_levels.notifications.clone(),
notification_power_levels: power_levels.notifications,
};
Ok(ruleset.get_actions(pdu, &ctx))
Ok(ruleset.get_actions(&pdu.to_sync_room_event(), &ctx))
}
#[tracing::instrument(skip(unread, pusher, tweaks, event, db))]
async fn send_notice(
unread: UInt,
pusher: &get_pushers::Pusher,
@ -277,7 +252,7 @@ async fn send_notice( @@ -277,7 +252,7 @@ async fn send_notice(
let mut data_minus_url = pusher.data.clone();
// The url must be stripped off according to spec
data_minus_url.url = None;
device.data = data_minus_url;
device.data = Some(data_minus_url);
// Tweaks are only added if the format is NOT event_id_only
if !event_id_only {
@ -304,7 +279,7 @@ async fn send_notice( @@ -304,7 +279,7 @@ async fn send_notice(
if event_id_only {
send_request(
&db.globals,
url,
&url,
send_event_notification::v1::Request::new(notifi),
)
.await?;
@ -320,23 +295,19 @@ async fn send_notice( @@ -320,23 +295,19 @@ async fn send_notice(
let user_name = db.users.displayname(&event.sender)?;
notifi.sender_display_name = user_name.as_deref();
let room_name = if let Some(room_name_pdu) =
db.rooms
.room_state_get(&event.room_id, &EventType::RoomName, "")?
{
serde_json::from_str::<RoomNameEventContent>(room_name_pdu.content.get())
.map_err(|_| Error::bad_database("Invalid room name event in database."))?
.name
} else {
None
};
let room_name = db
.rooms
.room_state_get(&event.room_id, &EventType::RoomName, "")?
.map(|pdu| match pdu.content.get("name") {
Some(serde_json::Value::String(s)) => Some(s.to_string()),
_ => None,
})
.flatten();
notifi.room_name = room_name.as_deref();
send_request(
&db.globals,
url,
&url,
send_event_notification::v1::Request::new(notifi),
)
.await?;

2410
src/database/rooms.rs

File diff suppressed because it is too large Load Diff

100
src/database/rooms/edus.rs

@ -11,7 +11,7 @@ use ruma::{ @@ -11,7 +11,7 @@ use ruma::{
};
use std::{
collections::{HashMap, HashSet},
convert::TryInto,
convert::{TryFrom, TryInto},
mem,
sync::Arc,
};
@ -60,7 +60,7 @@ impl RoomEdus { @@ -60,7 +60,7 @@ impl RoomEdus {
let mut room_latest_id = prefix;
room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes());
room_latest_id.push(0xff);
room_latest_id.extend_from_slice(user_id.as_bytes());
room_latest_id.extend_from_slice(&user_id.as_bytes());
self.readreceiptid_readreceipt.insert(
&room_latest_id,
@ -76,13 +76,8 @@ impl RoomEdus { @@ -76,13 +76,8 @@ impl RoomEdus {
&'a self,
room_id: &RoomId,
since: u64,
) -> impl Iterator<
Item = Result<(
Box<UserId>,
u64,
Raw<ruma::events::AnySyncEphemeralRoomEvent>,
)>,
> + 'a {
) -> impl Iterator<Item = Result<(UserId, u64, Raw<ruma::events::AnySyncEphemeralRoomEvent>)>> + 'a
{
let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff);
let prefix2 = prefix.clone();
@ -97,7 +92,7 @@ impl RoomEdus { @@ -97,7 +92,7 @@ impl RoomEdus {
let count =
utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::<u64>()])
.map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?;
let user_id = UserId::parse(
let user_id = UserId::try_from(
utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..])
.map_err(|_| {
Error::bad_database("Invalid readreceiptid userid bytes in db.")
@ -121,7 +116,6 @@ impl RoomEdus { @@ -121,7 +116,6 @@ impl RoomEdus {
}
/// Sets a private read marker at `count`.
#[tracing::instrument(skip(self, globals))]
pub fn private_read_set(
&self,
room_id: &RoomId,
@ -131,7 +125,7 @@ impl RoomEdus { @@ -131,7 +125,7 @@ impl RoomEdus {
) -> Result<()> {
let mut key = room_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(user_id.as_bytes());
key.extend_from_slice(&user_id.as_bytes());
self.roomuserid_privateread
.insert(&key, &count.to_be_bytes())?;
@ -147,7 +141,7 @@ impl RoomEdus { @@ -147,7 +141,7 @@ impl RoomEdus {
pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
let mut key = room_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(user_id.as_bytes());
key.extend_from_slice(&user_id.as_bytes());
self.roomuserid_privateread
.get(&key)?
@ -162,17 +156,16 @@ impl RoomEdus { @@ -162,17 +156,16 @@ impl RoomEdus {
pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {
let mut key = room_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(user_id.as_bytes());
key.extend_from_slice(&user_id.as_bytes());
Ok(self
.roomuserid_lastprivatereadupdate
.get(&key)?
.map(|bytes| {
utils::u64_from_bytes(&bytes).map_err(|_| {
.map_or(Ok::<_, Error>(None), |bytes| {
Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")
})
})
.transpose()?
})?))
})?
.unwrap_or(0))
}
@ -199,7 +192,7 @@ impl RoomEdus { @@ -199,7 +192,7 @@ impl RoomEdus {
.insert(&room_typing_id, &*user_id.as_bytes())?;
self.roomid_lasttypingupdate
.insert(room_id.as_bytes(), &count)?;
.insert(&room_id.as_bytes(), &count)?;
Ok(())
}
@ -230,7 +223,7 @@ impl RoomEdus { @@ -230,7 +223,7 @@ impl RoomEdus {
if found_outdated {
self.roomid_lasttypingupdate
.insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
.insert(&room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
}
Ok(())
@ -274,7 +267,7 @@ impl RoomEdus { @@ -274,7 +267,7 @@ impl RoomEdus {
if found_outdated {
self.roomid_lasttypingupdate
.insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
.insert(&room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
}
Ok(())
@ -291,13 +284,12 @@ impl RoomEdus { @@ -291,13 +284,12 @@ impl RoomEdus {
Ok(self
.roomid_lasttypingupdate
.get(room_id.as_bytes())?
.map(|bytes| {
utils::u64_from_bytes(&bytes).map_err(|_| {
.get(&room_id.as_bytes())?
.map_or(Ok::<_, Error>(None), |bytes| {
Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
})
})
.transpose()?
})?))
})?
.unwrap_or(0))
}
@ -310,13 +302,17 @@ impl RoomEdus { @@ -310,13 +302,17 @@ impl RoomEdus {
let mut user_ids = HashSet::new();
for (_, user_id) in self.typingid_userid.scan_prefix(prefix) {
let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| {
Error::bad_database("User ID in typingid_userid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?;
user_ids.insert(user_id);
for user_id in self
.typingid_userid
.scan_prefix(prefix)
.map(|(_, user_id)| {
UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| {
Error::bad_database("User ID in typingid_userid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))
})
{
user_ids.insert(user_id?);
}
Ok(SyncEphemeralRoomEvent {
@ -334,7 +330,7 @@ impl RoomEdus { @@ -334,7 +330,7 @@ impl RoomEdus {
&self,
user_id: &UserId,
room_id: &RoomId,
presence: PresenceEvent,
presence: ruma::events::presence::PresenceEvent,
globals: &super::super::globals::Globals,
) -> Result<()> {
// TODO: Remove old entry? Or maybe just wipe completely from time to time?
@ -345,7 +341,7 @@ impl RoomEdus { @@ -345,7 +341,7 @@ impl RoomEdus {
presence_id.push(0xff);
presence_id.extend_from_slice(&count);
presence_id.push(0xff);
presence_id.extend_from_slice(presence.sender.as_bytes());
presence_id.extend_from_slice(&presence.sender.as_bytes());
self.presenceid_presence.insert(
&presence_id,
@ -364,7 +360,7 @@ impl RoomEdus { @@ -364,7 +360,7 @@ impl RoomEdus {
#[tracing::instrument(skip(self))]
pub fn ping_presence(&self, user_id: &UserId) -> Result<()> {
self.userid_lastpresenceupdate.insert(
user_id.as_bytes(),
&user_id.as_bytes(),
&utils::millis_since_unix_epoch().to_be_bytes(),
)?;
@ -374,7 +370,7 @@ impl RoomEdus { @@ -374,7 +370,7 @@ impl RoomEdus {
/// Returns the timestamp of the last presence update of this user in millis since the unix epoch.
pub fn last_presence_update(&self, user_id: &UserId) -> Result<Option<u64>> {
self.userid_lastpresenceupdate
.get(user_id.as_bytes())?
.get(&user_id.as_bytes())?
.map(|bytes| {
utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.")
@ -397,12 +393,12 @@ impl RoomEdus { @@ -397,12 +393,12 @@ impl RoomEdus {
presence_id.push(0xff);
presence_id.extend_from_slice(&last_update.to_be_bytes());
presence_id.push(0xff);
presence_id.extend_from_slice(user_id.as_bytes());
presence_id.extend_from_slice(&user_id.as_bytes());
self.presenceid_presence
.get(&presence_id)?
.map(|value| {
let mut presence: PresenceEvent = serde_json::from_slice(&value)
let mut presence = serde_json::from_slice::<PresenceEvent>(&value)
.map_err(|_| Error::bad_database("Invalid presence event in db."))?;
let current_timestamp: UInt = utils::millis_since_unix_epoch()
.try_into()
@ -425,7 +421,7 @@ impl RoomEdus { @@ -425,7 +421,7 @@ impl RoomEdus {
}
/// Sets all users to offline who have been quiet for too long.
fn _presence_maintain(
pub fn presence_maintain(
&self,
rooms: &super::Rooms,
globals: &super::super::globals::Globals,
@ -450,7 +446,7 @@ impl RoomEdus { @@ -450,7 +446,7 @@ impl RoomEdus {
{
// Send new presence events to set the user offline
let count = globals.next_count()?.to_be_bytes();
let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes)
let user_id = utils::string_from_bytes(&user_id_bytes)
.map_err(|_| {
Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.")
})?
@ -476,14 +472,14 @@ impl RoomEdus { @@ -476,14 +472,14 @@ impl RoomEdus {
presence: PresenceState::Offline,
status_msg: None,
},
sender: user_id.to_owned(),
sender: user_id.clone(),
})
.expect("PresenceEvent can be serialized"),
)?;
}
self.userid_lastpresenceupdate.insert(
user_id.as_bytes(),
&user_id.as_bytes(),
&utils::millis_since_unix_epoch().to_be_bytes(),
)?;
}
@ -492,15 +488,15 @@ impl RoomEdus { @@ -492,15 +488,15 @@ impl RoomEdus {
}
/// Returns an iterator over the most recent presence updates that happened after the event with id `since`.
#[tracing::instrument(skip(self, since, _rooms, _globals))]
#[tracing::instrument(skip(self, globals, rooms))]
pub fn presence_since(
&self,
room_id: &RoomId,
since: u64,
_rooms: &super::Rooms,
_globals: &super::super::globals::Globals,
) -> Result<HashMap<Box<UserId>, PresenceEvent>> {
//self.presence_maintain(rooms, globals)?;
rooms: &super::Rooms,
globals: &super::super::globals::Globals,
) -> Result<HashMap<UserId, PresenceEvent>> {
self.presence_maintain(rooms, globals)?;
let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff);
@ -514,7 +510,7 @@ impl RoomEdus { @@ -514,7 +510,7 @@ impl RoomEdus {
.iter_from(&*first_possible_edu, false)
.take_while(|(key, _)| key.starts_with(&prefix))
{
let user_id = UserId::parse(
let user_id = UserId::try_from(
utils::string_from_bytes(
key.rsplit(|&b| b == 0xff)
.next()
@ -524,7 +520,7 @@ impl RoomEdus { @@ -524,7 +520,7 @@ impl RoomEdus {
)
.map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?;
let mut presence: PresenceEvent = serde_json::from_slice(&value)
let mut presence = serde_json::from_slice::<PresenceEvent>(&value)
.map_err(|_| Error::bad_database("Invalid presence event in db."))?;
let current_timestamp: UInt = utils::millis_since_unix_epoch()

350
src/database/sending.rs

@ -1,6 +1,6 @@ @@ -1,6 +1,6 @@
use std::{
collections::{BTreeMap, HashMap, HashSet},
convert::TryInto,
collections::{BTreeMap, HashMap},
convert::{TryFrom, TryInto},
fmt::Debug,
sync::Arc,
time::{Duration, Instant},
@ -10,6 +10,7 @@ use crate::{ @@ -10,6 +10,7 @@ use crate::{
appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result,
};
use federation::transactions::send_transaction_message;
use log::{error, warn};
use ring::digest;
use rocket::futures::{
channel::mpsc,
@ -20,23 +21,16 @@ use ruma::{ @@ -20,23 +21,16 @@ use ruma::{
appservice,
federation::{
self,
transactions::edu::{
DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap,
},
transactions::edu::{Edu, ReceiptContent, ReceiptData, ReceiptMap},
},
OutgoingRequest,
},
device_id,
events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType},
events::{push_rules, AnySyncEphemeralRoomEvent, EventType},
push,
receipt::ReceiptType,
uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
};
use tokio::{
select,
sync::{RwLock, Semaphore},
};
use tracing::{error, warn};
use tokio::{select, sync::Semaphore};
use super::abstraction::Tree;
@ -48,7 +42,6 @@ pub enum OutgoingKind { @@ -48,7 +42,6 @@ pub enum OutgoingKind {
}
impl OutgoingKind {
#[tracing::instrument(skip(self))]
pub fn get_prefix(&self) -> Vec<u8> {
let mut prefix = match self {
OutgoingKind::Appservice(server) => {
@ -58,9 +51,9 @@ impl OutgoingKind { @@ -58,9 +51,9 @@ impl OutgoingKind {
}
OutgoingKind::Push(user, pushkey) => {
let mut p = b"$".to_vec();
p.extend_from_slice(user);
p.extend_from_slice(&user);
p.push(0xff);
p.extend_from_slice(pushkey);
p.extend_from_slice(&pushkey);
p
}
OutgoingKind::Normal(server) => {
@ -84,10 +77,10 @@ pub enum SendingEventType { @@ -84,10 +77,10 @@ pub enum SendingEventType {
pub struct Sending {
/// The state for a given state hash.
pub(super) servername_educount: Arc<dyn Tree>, // EduCount: Count of last EDU sync
pub(super) servernameevent_data: Arc<dyn Tree>, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content
pub(super) servercurrentevent_data: Arc<dyn Tree>, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content
pub(super) servernamepduids: Arc<dyn Tree>, // ServernamePduId = (+ / $)SenderKey / ServerName / UserId + PduId
pub(super) servercurrentevents: Arc<dyn Tree>, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / (*)EduEvent
pub(super) maximum_requests: Arc<Semaphore>,
pub sender: mpsc::UnboundedSender<(Vec<u8>, Vec<u8>)>,
pub sender: mpsc::UnboundedSender<Vec<u8>>,
}
enum TransactionStatus {
@ -97,11 +90,7 @@ enum TransactionStatus { @@ -97,11 +90,7 @@ enum TransactionStatus {
}
impl Sending {
pub fn start_handler(
&self,
db: Arc<RwLock<Database>>,
mut receiver: mpsc::UnboundedReceiver<(Vec<u8>, Vec<u8>)>,
) {
pub fn start_handler(&self, db: Arc<Database>, mut receiver: mpsc::UnboundedReceiver<Vec<u8>>) {
tokio::spawn(async move {
let mut futures = FuturesUnordered::new();
@ -109,18 +98,15 @@ impl Sending { @@ -109,18 +98,15 @@ impl Sending {
// Retry requests we could not finish yet
let mut initial_transactions = HashMap::<OutgoingKind, Vec<SendingEventType>>::new();
let guard = db.read().await;
for (key, outgoing_kind, event) in guard
.sending
.servercurrentevent_data
.iter()
.filter_map(|(key, v)| {
Self::parse_servercurrentevent(&key, v)
.ok()
.map(|(k, e)| (key, k, e))
})
for (key, outgoing_kind, event) in
db.sending
.servercurrentevents
.iter()
.filter_map(|(key, _)| {
Self::parse_servercurrentevent(&key)
.ok()
.map(|(k, e)| (key, k, e))
})
{
let entry = initial_transactions
.entry(outgoing_kind.clone())
@ -131,23 +117,17 @@ impl Sending { @@ -131,23 +117,17 @@ impl Sending {
"Dropping some current events: {:?} {:?} {:?}",
key, outgoing_kind, event
);
guard.sending.servercurrentevent_data.remove(&key).unwrap();
db.sending.servercurrentevents.remove(&key).unwrap();
continue;
}
entry.push(event);
}
drop(guard);
for (outgoing_kind, events) in initial_transactions {
current_transaction_status
.insert(outgoing_kind.get_prefix(), TransactionStatus::Running);
futures.push(Self::handle_events(
outgoing_kind.clone(),
events,
Arc::clone(&db),
));
futures.push(Self::handle_events(outgoing_kind.clone(), events, &db));
}
loop {
@ -155,41 +135,43 @@ impl Sending { @@ -155,41 +135,43 @@ impl Sending {
Some(response) = futures.next() => {
match response {
Ok(outgoing_kind) => {
let guard = db.read().await;
let prefix = outgoing_kind.get_prefix();
for (key, _) in guard.sending.servercurrentevent_data
for (key, _) in db.sending.servercurrentevents
.scan_prefix(prefix.clone())
{
guard.sending.servercurrentevent_data.remove(&key).unwrap();
db.sending.servercurrentevents.remove(&key).unwrap();
}
// Find events that have been added since starting the last request
let new_events: Vec<_> = guard.sending.servernameevent_data
let new_events = db.sending.servernamepduids
.scan_prefix(prefix.clone())
.filter_map(|(k, v)| {
Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k))
.map(|(k, _)| {
SendingEventType::Pdu(k[prefix.len()..].to_vec())
})
.take(30)
.collect::<>();
.collect::<Vec<_>>();
// TODO: find edus
if !new_events.is_empty() {
// Insert pdus we found
for (e, key) in &new_events {
let value = if let SendingEventType::Edu(value) = &e.1 { &**value } else { &[] };
guard.sending.servercurrentevent_data.insert(key, value).unwrap();
guard.sending.servernameevent_data.remove(key).unwrap();
for event in &new_events {
let mut current_key = prefix.clone();
match event {
SendingEventType::Pdu(b) |
SendingEventType::Edu(b) => {
current_key.extend_from_slice(&b);
db.sending.servercurrentevents.insert(&current_key, &[]).unwrap();
db.sending.servernamepduids.remove(&current_key).unwrap();
}
}
}
drop(guard);
futures.push(
Self::handle_events(
outgoing_kind.clone(),
new_events.into_iter().map(|(event, _)| event.1).collect(),
Arc::clone(&db),
new_events,
&db,
)
);
} else {
@ -208,17 +190,15 @@ impl Sending { @@ -208,17 +190,15 @@ impl Sending {
}
};
},
Some((key, value)) = receiver.next() => {
if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) {
let guard = db.read().await;
Some(key) = receiver.next() => {
if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key) {
if let Ok(Some(events)) = Self::select_events(
&outgoing_kind,
vec![(event, key)],
&mut current_transaction_status,
&guard
&db
) {
futures.push(Self::handle_events(outgoing_kind, events, Arc::clone(&db)));
futures.push(Self::handle_events(outgoing_kind, events, &db));
}
}
}
@ -227,7 +207,6 @@ impl Sending { @@ -227,7 +207,6 @@ impl Sending {
});
}
#[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status, db))]
fn select_events(
outgoing_kind: &OutgoingKind,
new_events: Vec<(SendingEventType, Vec<u8>)>, // Events we want to send: event and full key
@ -270,33 +249,25 @@ impl Sending { @@ -270,33 +249,25 @@ impl Sending {
if retry {
// We retry the previous transaction
for (key, value) in db.sending.servercurrentevent_data.scan_prefix(prefix) {
if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) {
for (key, _) in db.sending.servercurrentevents.scan_prefix(prefix) {
if let Ok((_, e)) = Self::parse_servercurrentevent(&key) {
events.push(e);
}
}
} else {
for (e, full_key) in new_events {
let value = if let SendingEventType::Edu(value) = &e {
&**value
} else {
&[][..]
};
db.sending
.servercurrentevent_data
.insert(&full_key, value)?;
db.sending.servercurrentevents.insert(&full_key, &[])?;
// If it was a PDU we have to unqueue it
// TODO: don't try to unqueue EDUs
db.sending.servernameevent_data.remove(&full_key)?;
db.sending.servernamepduids.remove(&full_key)?;
events.push(e);
}
if let OutgoingKind::Normal(server_name) = outgoing_kind {
if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) {
events.extend(select_edus.into_iter().map(SendingEventType::Edu));
events.extend_from_slice(&select_edus);
db.sending
.servername_educount
.insert(server_name.as_bytes(), &last_count.to_be_bytes())?;
@ -307,8 +278,7 @@ impl Sending { @@ -307,8 +278,7 @@ impl Sending {
Ok(Some(events))
}
#[tracing::instrument(skip(db, server))]
pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec<Vec<u8>>, u64)> {
pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec<SendingEventType>, u64)> {
// u64: count of last edu
let since = db
.sending
@ -320,19 +290,8 @@ impl Sending { @@ -320,19 +290,8 @@ impl Sending {
})?;
let mut events = Vec::new();
let mut max_edu_count = since;
let mut device_list_changes = HashSet::new();
'outer: for room_id in db.rooms.server_rooms(server) {
let room_id = room_id?;
// Look for device list updates in this room
device_list_changes.extend(
db.users
.keys_changed(&room_id.to_string(), since, None)
.filter_map(|r| r.ok())
.filter(|user_id| user_id.server_name() == db.globals.server_name()),
);
// Look for read receipts in this room
for r in db.rooms.edus.readreceipts_since(&room_id, since) {
let (user_id, count, read_receipt) = r?;
@ -344,8 +303,8 @@ impl Sending { @@ -344,8 +303,8 @@ impl Sending {
continue;
}
let event: AnySyncEphemeralRoomEvent =
serde_json::from_str(read_receipt.json().get())
let event =
serde_json::from_str::<AnySyncEphemeralRoomEvent>(&read_receipt.json().get())
.map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?;
let federation_event = match event {
AnySyncEphemeralRoomEvent::Receipt(r) => {
@ -384,7 +343,9 @@ impl Sending { @@ -384,7 +343,9 @@ impl Sending {
}
};
events.push(serde_json::to_vec(&federation_event).expect("json can be serialized"));
events.push(SendingEventType::Edu(
serde_json::to_vec(&federation_event).expect("json can be serialized"),
));
if events.len() >= 20 {
break 'outer;
@ -392,70 +353,28 @@ impl Sending { @@ -392,70 +353,28 @@ impl Sending {
}
}
for user_id in device_list_changes {
// Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767
// Because synapse resyncs, we can just insert dummy data
let edu = Edu::DeviceListUpdate(DeviceListUpdateContent {
user_id,
device_id: device_id!("dummy").to_owned(),
device_display_name: Some("Dummy".to_owned()),
stream_id: uint!(1),
prev_id: Vec::new(),
deleted: None,
keys: None,
});
events.push(serde_json::to_vec(&edu).expect("json can be serialized"));
}
Ok((events, max_edu_count))
}
#[tracing::instrument(skip(self, pdu_id, senderkey))]
pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Vec<u8>) -> Result<()> {
#[tracing::instrument(skip(self))]
pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Box<[u8]>) -> Result<()> {
let mut key = b"$".to_vec();
key.extend_from_slice(&senderkey);
key.push(0xff);
key.extend_from_slice(pdu_id);
self.servernameevent_data.insert(&key, &[])?;
self.sender.unbounded_send((key, vec![])).unwrap();
Ok(())
}
#[tracing::instrument(skip(self, servers, pdu_id))]
pub fn send_pdu<I: Iterator<Item = Box<ServerName>>>(
&self,
servers: I,
pdu_id: &[u8],
) -> Result<()> {
let mut batch = servers.map(|server| {
let mut key = server.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(pdu_id);
self.sender.unbounded_send((key.clone(), vec![])).unwrap();
(key, Vec::new())
});
self.servernameevent_data.insert_batch(&mut batch)?;
self.servernamepduids.insert(&key, b"")?;
self.sender.unbounded_send(key).unwrap();
Ok(())
}
#[tracing::instrument(skip(self, server, serialized))]
pub fn send_reliable_edu(
&self,
server: &ServerName,
serialized: Vec<u8>,
id: u64,
) -> Result<()> {
#[tracing::instrument(skip(self))]
pub fn send_pdu(&self, server: &ServerName, pdu_id: &[u8]) -> Result<()> {
let mut key = server.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&id.to_be_bytes());
self.servernameevent_data.insert(&key, &serialized)?;
self.sender.unbounded_send((key, serialized)).unwrap();
key.extend_from_slice(pdu_id);
self.servernamepduids.insert(&key, b"")?;
self.sender.unbounded_send(key).unwrap();
Ok(())
}
@ -466,13 +385,13 @@ impl Sending { @@ -466,13 +385,13 @@ impl Sending {
key.extend_from_slice(appservice_id.as_bytes());
key.push(0xff);
key.extend_from_slice(pdu_id);
self.servernameevent_data.insert(&key, &[])?;
self.sender.unbounded_send((key, vec![])).unwrap();
self.servernamepduids.insert(&key, b"")?;
self.sender.unbounded_send(key).unwrap();
Ok(())
}
#[tracing::instrument(skip(keys))]
#[tracing::instrument]
fn calculate_hash(keys: &[&[u8]]) -> Vec<u8> {
// We only hash the pdu's event ids, not the whole pdu
let bytes = keys.join(&0xff);
@ -480,14 +399,12 @@ impl Sending { @@ -480,14 +399,12 @@ impl Sending {
hash.as_ref().to_owned()
}
#[tracing::instrument(skip(db, events, kind))]
#[tracing::instrument(skip(db))]
async fn handle_events(
kind: OutgoingKind,
events: Vec<SendingEventType>,
db: Arc<RwLock<Database>>,
) -> Result<OutgoingKind, (OutgoingKind, Error)> {
let db = db.read().await;
db: &Database,
) -> std::result::Result<OutgoingKind, (OutgoingKind, Error)> {
match &kind {
OutgoingKind::Appservice(server) => {
let mut pdu_jsons = Vec::new();
@ -496,13 +413,13 @@ impl Sending { @@ -496,13 +413,13 @@ impl Sending {
match event {
SendingEventType::Pdu(pdu_id) => {
pdu_jsons.push(db.rooms
.get_pdu_from_id(pdu_id)
.get_pdu_from_id(&pdu_id)
.map_err(|e| (kind.clone(), e))?
.ok_or_else(|| {
(
kind.clone(),
Error::bad_database(
"[Appservice] Event in servernameevent_data not found in db.",
"[Appservice] Event in servernamepduids not found in db.",
),
)
})?
@ -553,13 +470,13 @@ impl Sending { @@ -553,13 +470,13 @@ impl Sending {
SendingEventType::Pdu(pdu_id) => {
pdus.push(
db.rooms
.get_pdu_from_id(pdu_id)
.get_pdu_from_id(&pdu_id)
.map_err(|e| (kind.clone(), e))?
.ok_or_else(|| {
(
kind.clone(),
Error::bad_database(
"[Push] Event in servernamevent_datas not found in db.",
"[Push] Event in servernamepduids not found in db.",
),
)
})?,
@ -573,28 +490,23 @@ impl Sending { @@ -573,28 +490,23 @@ impl Sending {
for pdu in pdus {
// Redacted events are not notification targets (we don't send push for them)
if let Some(unsigned) = &pdu.unsigned {
if let Ok(unsigned) =
serde_json::from_str::<serde_json::Value>(unsigned.get())
{
if unsigned.get("redacted_because").is_some() {
continue;
}
}
if pdu.unsigned.get("redacted_because").is_some() {
continue;
}
let userid = UserId::parse(utils::string_from_bytes(user).map_err(|_| {
(
kind.clone(),
Error::bad_database("Invalid push user string in db."),
)
})?)
.map_err(|_| {
(
kind.clone(),
Error::bad_database("Invalid push user id in db."),
)
})?;
let userid =
UserId::try_from(utils::string_from_bytes(user).map_err(|_| {
(
kind.clone(),
Error::bad_database("Invalid push user string in db."),
)
})?)
.map_err(|_| {
(
kind.clone(),
Error::bad_database("Invalid push user id in db."),
)
})?;
let mut senderkey = user.clone();
senderkey.push(0xff);
@ -611,9 +523,9 @@ impl Sending { @@ -611,9 +523,9 @@ impl Sending {
let rules_for_user = db
.account_data
.get(None, &userid, EventType::PushRules)
.get::<push_rules::PushRulesEvent>(None, &userid, EventType::PushRules)
.unwrap_or_default()
.map(|ev: PushRulesEvent| ev.content.global)
.map(|ev| ev.content.global)
.unwrap_or_else(|| push::Ruleset::server_default(&userid));
let unread: UInt = db
@ -631,7 +543,7 @@ impl Sending { @@ -631,7 +543,7 @@ impl Sending {
&pusher,
rules_for_user,
&pdu,
&db,
db,
)
.await
.map(|_response| kind.clone())
@ -649,25 +561,29 @@ impl Sending { @@ -649,25 +561,29 @@ impl Sending {
match event {
SendingEventType::Pdu(pdu_id) => {
// TODO: check room version and remove event_id if needed
let raw = PduEvent::convert_to_outgoing_federation_event(
db.rooms
.get_pdu_json_from_id(pdu_id)
.map_err(|e| (OutgoingKind::Normal(server.clone()), e))?
.ok_or_else(|| {
(
OutgoingKind::Normal(server.clone()),
Error::bad_database(
"[Normal] Event in servernamevent_datas not found in db.",
),
)
})?,
);
pdu_jsons.push(raw);
pdu_jsons.push(serde_json::from_str(
PduEvent::convert_to_outgoing_federation_event(
db.rooms
.get_pdu_json_from_id(&pdu_id)
.map_err(|e| (OutgoingKind::Normal(server.clone()), e))?
.ok_or_else(|| {
(
OutgoingKind::Normal(server.clone()),
Error::bad_database(
"[Normal] Event in servernamepduids not found in db.",
),
)
})?,
)
.json()
.get(),
)
.expect("Raw<..> is always valid"));
}
SendingEventType::Edu(edu) => {
if let Ok(raw) = serde_json::from_slice(edu) {
edu_jsons.push(raw);
}
edu_jsons.push(
serde_json::from_slice(edu).expect("Raw<..> is always valid"),
);
}
}
}
@ -713,11 +629,7 @@ impl Sending { @@ -713,11 +629,7 @@ impl Sending {
}
}
#[tracing::instrument(skip(key))]
fn parse_servercurrentevent(
key: &[u8],
value: Vec<u8>,
) -> Result<(OutgoingKind, SendingEventType)> {
fn parse_servercurrentevent(key: &[u8]) -> Result<(OutgoingKind, SendingEventType)> {
// Appservices start with a plus
Ok::<_, Error>(if key.starts_with(b"+") {
let mut parts = key[1..].splitn(2, |&b| b == 0xff);
@ -726,18 +638,18 @@ impl Sending { @@ -726,18 +638,18 @@ impl Sending {
let event = parts
.next()
.ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?;
let server = utils::string_from_bytes(server).map_err(|_| {
let server = utils::string_from_bytes(&server).map_err(|_| {
Error::bad_database("Invalid server bytes in server_currenttransaction")
})?;
(
OutgoingKind::Appservice(ServerName::parse(server).map_err(|_| {
OutgoingKind::Appservice(Box::<ServerName>::try_from(server).map_err(|_| {
Error::bad_database("Invalid server string in server_currenttransaction")
})?),
if value.is_empty() {
SendingEventType::Pdu(event.to_vec())
if event.starts_with(b"*") {
SendingEventType::Edu(event[1..].to_vec())
} else {
SendingEventType::Edu(value)
SendingEventType::Pdu(event.to_vec())
},
)
} else if key.starts_with(b"$") {
@ -752,10 +664,10 @@ impl Sending { @@ -752,10 +664,10 @@ impl Sending {
.ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?;
(
OutgoingKind::Push(user.to_vec(), pushkey.to_vec()),
if value.is_empty() {
SendingEventType::Pdu(event.to_vec())
if event.starts_with(b"*") {
SendingEventType::Edu(event[1..].to_vec())
} else {
SendingEventType::Edu(value)
SendingEventType::Pdu(event.to_vec())
},
)
} else {
@ -765,24 +677,24 @@ impl Sending { @@ -765,24 +677,24 @@ impl Sending {
let event = parts
.next()
.ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?;
let server = utils::string_from_bytes(server).map_err(|_| {
let server = utils::string_from_bytes(&server).map_err(|_| {
Error::bad_database("Invalid server bytes in server_currenttransaction")
})?;
(
OutgoingKind::Normal(ServerName::parse(server).map_err(|_| {
OutgoingKind::Normal(Box::<ServerName>::try_from(server).map_err(|_| {
Error::bad_database("Invalid server string in server_currenttransaction")
})?),
if value.is_empty() {
SendingEventType::Pdu(event.to_vec())
if event.starts_with(b"*") {
SendingEventType::Edu(event[1..].to_vec())
} else {
SendingEventType::Edu(value)
SendingEventType::Pdu(event.to_vec())
},
)
})
}
#[tracing::instrument(skip(self, globals, destination, request))]
#[tracing::instrument(skip(self, globals))]
pub async fn send_federation_request<T: OutgoingRequest>(
&self,
globals: &crate::database::globals::Globals,
@ -799,7 +711,7 @@ impl Sending { @@ -799,7 +711,7 @@ impl Sending {
response
}
#[tracing::instrument(skip(self, globals, registration, request))]
#[tracing::instrument(skip(self, globals))]
pub async fn send_appservice_request<T: OutgoingRequest>(
&self,
globals: &crate::database::globals::Globals,

189
src/database/uiaa.rs

@ -4,15 +4,11 @@ use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; @@ -4,15 +4,11 @@ use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result};
use ruma::{
api::client::{
error::ErrorKind,
r0::uiaa::{
AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId,
UiaaInfo,
},
r0::uiaa::{IncomingAuthData, UiaaInfo},
},
signatures::CanonicalJsonValue,
DeviceId, UserId,
};
use tracing::error;
use super::abstraction::Tree;
@ -53,91 +49,126 @@ impl Uiaa { @@ -53,91 +49,126 @@ impl Uiaa {
users: &super::users::Users,
globals: &super::globals::Globals,
) -> Result<(bool, UiaaInfo)> {
let mut uiaainfo = auth
.session()
.map(|session| self.get_uiaa_session(user_id, device_id, session))
.unwrap_or_else(|| Ok(uiaainfo.clone()))?;
if let IncomingAuthData::DirectRequest {
kind,
session,
auth_parameters,
} = &auth
{
let mut uiaainfo = session
.as_ref()
.map(|session| self.get_uiaa_session(&user_id, &device_id, session))
.unwrap_or_else(|| Ok(uiaainfo.clone()))?;
if uiaainfo.session.is_none() {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
}
if uiaainfo.session.is_none() {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
}
match auth {
// Find out what the user completed
IncomingAuthData::Password(IncomingPassword {
identifier,
password,
..
}) => {
let username = match identifier {
MatrixId(username) => username,
_ => {
match &**kind {
"m.login.password" => {
let identifier = auth_parameters.get("identifier").ok_or(Error::BadRequest(
ErrorKind::MissingParam,
"m.login.password needs identifier.",
))?;
let identifier_type = identifier.get("type").ok_or(Error::BadRequest(
ErrorKind::MissingParam,
"Identifier needs a type.",
))?;
if identifier_type != "m.id.user" {
return Err(Error::BadRequest(
ErrorKind::Unrecognized,
"Identifier type not recognized.",
))
));
}
};
let user_id =
UserId::parse_with_server_name(username.clone(), globals.server_name())
let username = identifier
.get("user")
.ok_or(Error::BadRequest(
ErrorKind::MissingParam,
"Identifier needs user field.",
))?
.as_str()
.ok_or(Error::BadRequest(
ErrorKind::BadJson,
"User is not a string.",
))?;
let user_id = UserId::parse_with_server_name(username, globals.server_name())
.map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.")
})?;
// Check if password is correct
if let Some(hash) = users.password_hash(&user_id)? {
let hash_matches =
argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false);
if !hash_matches {
uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody {
kind: ErrorKind::Forbidden,
message: "Invalid username or password.".to_owned(),
});
return Ok((false, uiaainfo));
Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.")
})?;
let password = auth_parameters
.get("password")
.ok_or(Error::BadRequest(
ErrorKind::MissingParam,
"Password is missing.",
))?
.as_str()
.ok_or(Error::BadRequest(
ErrorKind::BadJson,
"Password is not a string.",
))?;
// Check if password is correct
if let Some(hash) = users.password_hash(&user_id)? {
let hash_matches =
argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false);
if !hash_matches {
uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody {
kind: ErrorKind::Forbidden,
message: "Invalid username or password.".to_owned(),
});
return Ok((false, uiaainfo));
}
}
}
// Password was correct! Let's add it to `completed`
uiaainfo.completed.push(AuthType::Password);
}
IncomingAuthData::Dummy(_) => {
uiaainfo.completed.push(AuthType::Dummy);
// Password was correct! Let's add it to `completed`
uiaainfo.completed.push("m.login.password".to_owned());
}
"m.login.dummy" => {
uiaainfo.completed.push("m.login.dummy".to_owned());
}
k => panic!("type not supported: {}", k),
}
k => error!("type not supported: {:?}", k),
}
// Check if a flow now succeeds
let mut completed = false;
'flows: for flow in &mut uiaainfo.flows {
for stage in &flow.stages {
if !uiaainfo.completed.contains(stage) {
continue 'flows;
// Check if a flow now succeeds
let mut completed = false;
'flows: for flow in &mut uiaainfo.flows {
for stage in &flow.stages {
if !uiaainfo.completed.contains(stage) {
continue 'flows;
}
}
// We didn't break, so this flow succeeded!
completed = true;
}
// We didn't break, so this flow succeeded!
completed = true;
}
if !completed {
if !completed {
self.update_uiaa_session(
user_id,
device_id,
uiaainfo.session.as_ref().expect("session is always set"),
Some(&uiaainfo),
)?;
return Ok((false, uiaainfo));
}
// UIAA was successful! Remove this session and return true
self.update_uiaa_session(
user_id,
device_id,
uiaainfo.session.as_ref().expect("session is always set"),
Some(&uiaainfo),
None,
)?;
return Ok((false, uiaainfo));
Ok((true, uiaainfo))
} else {
panic!("FallbackAcknowledgement is not supported yet");
}
// UIAA was successful! Remove this session and return true
self.update_uiaa_session(
user_id,
device_id,
uiaainfo.session.as_ref().expect("session is always set"),
None,
)?;
Ok((true, uiaainfo))
}
fn set_uiaa_request(
@ -175,14 +206,16 @@ impl Uiaa { @@ -175,14 +206,16 @@ impl Uiaa {
self.userdevicesessionid_uiaarequest
.get(&userdevicesessionid)?
.map(|bytes| {
serde_json::from_str::<CanonicalJsonValue>(
&utils::string_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Invalid uiaa request bytes in db."))?,
)
.map_err(|_| Error::bad_database("Invalid uiaa request in db."))
.map_or(Ok(None), |bytes| {
Ok::<_, Error>(Some(
serde_json::from_str::<CanonicalJsonValue>(
&utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Invalid uiaa request bytes in db.")
})?,
)
.map_err(|_| Error::bad_database("Invalid uiaa request in db."))?,
))
})
.transpose()
}
fn update_uiaa_session(
@ -223,7 +256,7 @@ impl Uiaa { @@ -223,7 +256,7 @@ impl Uiaa {
userdevicesessionid.push(0xff);
userdevicesessionid.extend_from_slice(session.as_bytes());
serde_json::from_slice(
let uiaainfo = serde_json::from_slice::<UiaaInfo>(
&self
.userdevicesessionid_uiaainfo
.get(&userdevicesessionid)?
@ -232,6 +265,8 @@ impl Uiaa { @@ -232,6 +265,8 @@ impl Uiaa {
"UIAA session does not exist.",
))?,
)
.map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid."))
.map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid."))?;
Ok(uiaainfo)
}
}

197
src/database/users.rs

@ -5,11 +5,9 @@ use ruma::{ @@ -5,11 +5,9 @@ use ruma::{
events::{AnyToDeviceEvent, EventType},
identifiers::MxcUri,
serde::Raw,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt,
UserId,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, UInt, UserId,
};
use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc};
use tracing::warn;
use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc};
use super::abstraction::Tree;
@ -17,7 +15,6 @@ pub struct Users { @@ -17,7 +15,6 @@ pub struct Users {
pub(super) userid_password: Arc<dyn Tree>,
pub(super) userid_displayname: Arc<dyn Tree>,
pub(super) userid_avatarurl: Arc<dyn Tree>,
pub(super) userid_blurhash: Arc<dyn Tree>,
pub(super) userdeviceid_token: Arc<dyn Tree>,
pub(super) userdeviceid_metadata: Arc<dyn Tree>, // This is also used to check if a device exists
pub(super) userid_devicelistversion: Arc<dyn Tree>, // DevicelistVersion = u64
@ -36,13 +33,11 @@ pub struct Users { @@ -36,13 +33,11 @@ pub struct Users {
impl Users {
/// Check if a user has an account on this homeserver.
#[tracing::instrument(skip(self, user_id))]
pub fn exists(&self, user_id: &UserId) -> Result<bool> {
Ok(self.userid_password.get(user_id.as_bytes())?.is_some())
}
/// Check if account is deactivated
#[tracing::instrument(skip(self, user_id))]
pub fn is_deactivated(&self, user_id: &UserId) -> Result<bool> {
Ok(self
.userid_password
@ -54,37 +49,19 @@ impl Users { @@ -54,37 +49,19 @@ impl Users {
.is_empty())
}
/// Check if a user is an admin
#[tracing::instrument(skip(self, user_id, rooms, globals))]
pub fn is_admin(
&self,
user_id: &UserId,
rooms: &super::rooms::Rooms,
globals: &super::globals::Globals,
) -> Result<bool> {
let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap();
rooms.is_joined(user_id, &admin_room_id)
}
/// Create a new user account on this homeserver.
#[tracing::instrument(skip(self, user_id, password))]
pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
self.set_password(user_id, password)?;
Ok(())
}
/// Returns the number of users registered on this server.
#[tracing::instrument(skip(self))]
pub fn count(&self) -> Result<usize> {
Ok(self.userid_password.iter().count())
}
/// Find out which user an access token belongs to.
#[tracing::instrument(skip(self, token))]
pub fn find_from_token(&self, token: &str) -> Result<Option<(Box<UserId>, String)>> {
pub fn find_from_token(&self, token: &str) -> Result<Option<(UserId, String)>> {
self.token_userdeviceid
.get(token.as_bytes())?
.map_or(Ok(None), |bytes| {
@ -97,13 +74,13 @@ impl Users { @@ -97,13 +74,13 @@ impl Users {
})?;
Ok(Some((
UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| {
UserId::try_from(utils::string_from_bytes(&user_bytes).map_err(|_| {
Error::bad_database("User ID in token_userdeviceid is invalid unicode.")
})?)
.map_err(|_| {
Error::bad_database("User ID in token_userdeviceid is invalid.")
})?,
utils::string_from_bytes(device_bytes).map_err(|_| {
utils::string_from_bytes(&device_bytes).map_err(|_| {
Error::bad_database("Device ID in token_userdeviceid is invalid.")
})?,
)))
@ -111,10 +88,9 @@ impl Users { @@ -111,10 +88,9 @@ impl Users {
}
/// Returns an iterator over all users on this homeserver.
#[tracing::instrument(skip(self))]
pub fn iter(&self) -> impl Iterator<Item = Result<Box<UserId>>> + '_ {
pub fn iter(&self) -> impl Iterator<Item = Result<UserId>> + '_ {
self.userid_password.iter().map(|(bytes, _)| {
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in userid_password is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in userid_password is invalid."))
@ -122,7 +98,6 @@ impl Users { @@ -122,7 +98,6 @@ impl Users {
}
/// Returns the password hash for the given user.
#[tracing::instrument(skip(self, user_id))]
pub fn password_hash(&self, user_id: &UserId) -> Result<Option<String>> {
self.userid_password
.get(user_id.as_bytes())?
@ -134,10 +109,9 @@ impl Users { @@ -134,10 +109,9 @@ impl Users {
}
/// Hash and set the user's password to the Argon2 hash
#[tracing::instrument(skip(self, user_id, password))]
pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
if let Some(password) = password {
if let Ok(hash) = utils::calculate_hash(password) {
if let Ok(hash) = utils::calculate_hash(&password) {
self.userid_password
.insert(user_id.as_bytes(), hash.as_bytes())?;
Ok(())
@ -154,7 +128,6 @@ impl Users { @@ -154,7 +128,6 @@ impl Users {
}
/// Returns the displayname of a user on this homeserver.
#[tracing::instrument(skip(self, user_id))]
pub fn displayname(&self, user_id: &UserId) -> Result<Option<String>> {
self.userid_displayname
.get(user_id.as_bytes())?
@ -166,7 +139,6 @@ impl Users { @@ -166,7 +139,6 @@ impl Users {
}
/// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change.
#[tracing::instrument(skip(self, user_id, displayname))]
pub fn set_displayname(&self, user_id: &UserId, displayname: Option<String>) -> Result<()> {
if let Some(displayname) = displayname {
self.userid_displayname
@ -178,23 +150,20 @@ impl Users { @@ -178,23 +150,20 @@ impl Users {
Ok(())
}
/// Get the avatar_url of a user.
#[tracing::instrument(skip(self, user_id))]
pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<Box<MxcUri>>> {
/// Get a the avatar_url of a user.
pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<MxcUri>> {
self.userid_avatarurl
.get(user_id.as_bytes())?
.map(|bytes| {
let s = utils::string_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
s.try_into()
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
})
.transpose()
}
/// Sets a new avatar_url or removes it if avatar_url is None.
#[tracing::instrument(skip(self, user_id, avatar_url))]
pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<Box<MxcUri>>) -> Result<()> {
pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<MxcUri>) -> Result<()> {
if let Some(avatar_url) = avatar_url {
self.userid_avatarurl
.insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?;
@ -205,35 +174,7 @@ impl Users { @@ -205,35 +174,7 @@ impl Users {
Ok(())
}
/// Get the blurhash of a user.
#[tracing::instrument(skip(self, user_id))]
pub fn blurhash(&self, user_id: &UserId) -> Result<Option<String>> {
self.userid_blurhash
.get(user_id.as_bytes())?
.map(|bytes| {
let s = utils::string_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
Ok(s)
})
.transpose()
}
/// Sets a new avatar_url or removes it if avatar_url is None.
#[tracing::instrument(skip(self, user_id, blurhash))]
pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option<String>) -> Result<()> {
if let Some(blurhash) = blurhash {
self.userid_blurhash
.insert(user_id.as_bytes(), blurhash.as_bytes())?;
} else {
self.userid_blurhash.remove(user_id.as_bytes())?;
}
Ok(())
}
/// Adds a new device to a user.
#[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))]
pub fn create_device(
&self,
user_id: &UserId,
@ -262,13 +203,12 @@ impl Users { @@ -262,13 +203,12 @@ impl Users {
.expect("Device::to_string never fails."),
)?;
self.set_token(user_id, device_id, token)?;
self.set_token(user_id, &device_id, token)?;
Ok(())
}
/// Removes a device from a user.
#[tracing::instrument(skip(self, user_id, device_id))]
pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
let mut userdeviceid = user_id.as_bytes().to_vec();
userdeviceid.push(0xff);
@ -299,7 +239,6 @@ impl Users { @@ -299,7 +239,6 @@ impl Users {
}
/// Returns an iterator over all device ids of this user.
#[tracing::instrument(skip(self, user_id))]
pub fn all_device_ids<'a>(
&'a self,
user_id: &UserId,
@ -311,7 +250,7 @@ impl Users { @@ -311,7 +250,7 @@ impl Users {
.scan_prefix(prefix)
.map(|(bytes, _)| {
Ok(utils::string_from_bytes(
bytes
&bytes
.rsplit(|&b| b == 0xff)
.next()
.ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?,
@ -322,7 +261,6 @@ impl Users { @@ -322,7 +261,6 @@ impl Users {
}
/// Replaces the access token of one device.
#[tracing::instrument(skip(self, user_id, device_id, token))]
pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> {
let mut userdeviceid = user_id.as_bytes().to_vec();
userdeviceid.push(0xff);
@ -346,14 +284,6 @@ impl Users { @@ -346,14 +284,6 @@ impl Users {
Ok(())
}
#[tracing::instrument(skip(
self,
user_id,
device_id,
one_time_key_key,
one_time_key_value,
globals
))]
pub fn add_one_time_key(
&self,
user_id: &UserId,
@ -374,7 +304,7 @@ impl Users { @@ -374,7 +304,7 @@ impl Users {
// TODO: Use DeviceKeyId::to_string when it's available (and update everything,
// because there are no wrapping quotation marks anymore)
key.extend_from_slice(
serde_json::to_string(one_time_key_key)
&serde_json::to_string(one_time_key_key)
.expect("DeviceKeyId::to_string always works")
.as_bytes(),
);
@ -385,15 +315,15 @@ impl Users { @@ -385,15 +315,15 @@ impl Users {
)?;
self.userid_lastonetimekeyupdate
.insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
.insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
Ok(())
}
#[tracing::instrument(skip(self, user_id))]
#[tracing::instrument(skip(self))]
pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result<u64> {
self.userid_lastonetimekeyupdate
.get(user_id.as_bytes())?
.get(&user_id.as_bytes())?
.map(|bytes| {
utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
@ -402,14 +332,13 @@ impl Users { @@ -402,14 +332,13 @@ impl Users {
.unwrap_or(Ok(0))
}
#[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))]
pub fn take_one_time_key(
&self,
user_id: &UserId,
device_id: &DeviceId,
key_algorithm: &DeviceKeyAlgorithm,
globals: &super::globals::Globals,
) -> Result<Option<(Box<DeviceKeyId>, OneTimeKey)>> {
) -> Result<Option<(DeviceKeyId, OneTimeKey)>> {
let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff);
prefix.extend_from_slice(device_id.as_bytes());
@ -419,7 +348,7 @@ impl Users { @@ -419,7 +348,7 @@ impl Users {
prefix.push(b':');
self.userid_lastonetimekeyupdate
.insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
.insert(&user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
self.onetimekeyid_onetimekeys
.scan_prefix(prefix)
@ -442,7 +371,7 @@ impl Users { @@ -442,7 +371,7 @@ impl Users {
.transpose()
}
#[tracing::instrument(skip(self, user_id, device_id))]
#[tracing::instrument(skip(self))]
pub fn count_one_time_keys(
&self,
user_id: &UserId,
@ -459,7 +388,7 @@ impl Users { @@ -459,7 +388,7 @@ impl Users {
.scan_prefix(userdeviceid)
.map(|(bytes, _)| {
Ok::<_, Error>(
serde_json::from_slice::<Box<DeviceKeyId>>(
serde_json::from_slice::<DeviceKeyId>(
&*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| {
Error::bad_database("OneTimeKey ID in db is invalid.")
})?,
@ -475,7 +404,6 @@ impl Users { @@ -475,7 +404,6 @@ impl Users {
Ok(counts)
}
#[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))]
pub fn add_device_keys(
&self,
user_id: &UserId,
@ -498,14 +426,6 @@ impl Users { @@ -498,14 +426,6 @@ impl Users {
Ok(())
}
#[tracing::instrument(skip(
self,
master_key,
self_signing_key,
user_signing_key,
rooms,
globals
))]
pub fn add_cross_signing_keys(
&self,
user_id: &UserId,
@ -606,7 +526,6 @@ impl Users { @@ -606,7 +526,6 @@ impl Users {
Ok(())
}
#[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))]
pub fn sign_key(
&self,
target_id: &UserId,
@ -620,11 +539,10 @@ impl Users { @@ -620,11 +539,10 @@ impl Users {
key.push(0xff);
key.extend_from_slice(key_id.as_bytes());
let mut cross_signing_key: serde_json::Value =
serde_json::from_slice(&self.keyid_key.get(&key)?.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to sign nonexistent key.",
))?)
let mut cross_signing_key =
serde_json::from_slice::<serde_json::Value>(&self.keyid_key.get(&key)?.ok_or(
Error::BadRequest(ErrorKind::InvalidParam, "Tried to sign nonexistent key."),
)?)
.map_err(|_| Error::bad_database("key in keyid_key is invalid."))?;
let signatures = cross_signing_key
@ -632,7 +550,7 @@ impl Users { @@ -632,7 +550,7 @@ impl Users {
.ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))?
.as_object_mut()
.ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))?
.entry(sender_id.to_owned())
.entry(sender_id.clone())
.or_insert_with(|| serde_json::Map::new().into());
signatures
@ -651,54 +569,38 @@ impl Users { @@ -651,54 +569,38 @@ impl Users {
Ok(())
}
#[tracing::instrument(skip(self, user_or_room_id, from, to))]
#[tracing::instrument(skip(self))]
pub fn keys_changed<'a>(
&'a self,
user_or_room_id: &str,
from: u64,
to: Option<u64>,
) -> impl Iterator<Item = Result<Box<UserId>>> + 'a {
) -> impl Iterator<Item = Result<UserId>> + 'a {
let mut prefix = user_or_room_id.as_bytes().to_vec();
prefix.push(0xff);
let mut start = prefix.clone();
start.extend_from_slice(&(from + 1).to_be_bytes());
let to = to.unwrap_or(u64::MAX);
self.keychangeid_userid
.iter_from(&start, false)
.take_while(move |(k, _)| {
k.starts_with(&prefix)
&& if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) {
if let Ok(c) = utils::u64_from_bytes(current) {
c <= to
} else {
warn!("BadDatabase: Could not parse keychangeid_userid bytes");
false
}
} else {
warn!("BadDatabase: Could not parse keychangeid_userid");
false
}
})
.take_while(move |(k, _)| k.starts_with(&prefix))
.map(|(_, bytes)| {
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid."))
})
}
#[tracing::instrument(skip(self, user_id, rooms, globals))]
pub fn mark_device_key_update(
fn mark_device_key_update(
&self,
user_id: &UserId,
rooms: &super::rooms::Rooms,
globals: &super::globals::Globals,
) -> Result<()> {
let count = globals.next_count()?.to_be_bytes();
for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) {
// Don't send key updates to unencrypted rooms
if rooms
.room_state_get(&room_id, &EventType::RoomEncryption, "")?
@ -722,7 +624,6 @@ impl Users { @@ -722,7 +624,6 @@ impl Users {
Ok(())
}
#[tracing::instrument(skip(self, user_id, device_id))]
pub fn get_device_keys(
&self,
user_id: &UserId,
@ -739,7 +640,6 @@ impl Users { @@ -739,7 +640,6 @@ impl Users {
})
}
#[tracing::instrument(skip(self, user_id, allowed_signatures))]
pub fn get_master_key<F: Fn(&UserId) -> bool>(
&self,
user_id: &UserId,
@ -767,7 +667,6 @@ impl Users { @@ -767,7 +667,6 @@ impl Users {
})
}
#[tracing::instrument(skip(self, user_id, allowed_signatures))]
pub fn get_self_signing_key<F: Fn(&UserId) -> bool>(
&self,
user_id: &UserId,
@ -795,7 +694,6 @@ impl Users { @@ -795,7 +694,6 @@ impl Users {
})
}
#[tracing::instrument(skip(self, user_id))]
pub fn get_user_signing_key(&self, user_id: &UserId) -> Result<Option<CrossSigningKey>> {
self.userid_usersigningkeyid
.get(user_id.as_bytes())?
@ -808,15 +706,6 @@ impl Users { @@ -808,15 +706,6 @@ impl Users {
})
}
#[tracing::instrument(skip(
self,
sender,
target_user_id,
target_device_id,
event_type,
content,
globals
))]
pub fn add_to_device_event(
&self,
sender: &UserId,
@ -837,14 +726,15 @@ impl Users { @@ -837,14 +726,15 @@ impl Users {
json.insert("sender".to_owned(), sender.to_string().into());
json.insert("content".to_owned(), content);
let value = serde_json::to_vec(&json).expect("Map::to_vec always works");
self.todeviceid_events.insert(&key, &value)?;
self.todeviceid_events.insert(
&key,
&serde_json::to_vec(&json).expect("Map::to_vec always works"),
)?;
Ok(())
}
#[tracing::instrument(skip(self, user_id, device_id))]
#[tracing::instrument(skip(self))]
pub fn get_to_device_events(
&self,
user_id: &UserId,
@ -867,7 +757,7 @@ impl Users { @@ -867,7 +757,7 @@ impl Users {
Ok(events)
}
#[tracing::instrument(skip(self, user_id, device_id, until))]
#[tracing::instrument(skip(self))]
pub fn remove_to_device_events(
&self,
user_id: &UserId,
@ -884,7 +774,7 @@ impl Users { @@ -884,7 +774,7 @@ impl Users {
for (key, _) in self
.todeviceid_events
.iter_from(&last, true) // this includes last
.iter_from(&last, true)
.take_while(move |(k, _)| k.starts_with(&prefix))
.map(|(key, _)| {
Ok::<_, Error>((
@ -902,7 +792,6 @@ impl Users { @@ -902,7 +792,6 @@ impl Users {
Ok(())
}
#[tracing::instrument(skip(self, user_id, device_id, device))]
pub fn update_device_metadata(
&self,
user_id: &UserId,
@ -928,7 +817,6 @@ impl Users { @@ -928,7 +817,6 @@ impl Users {
}
/// Get device metadata.
#[tracing::instrument(skip(self, user_id, device_id))]
pub fn get_device_metadata(
&self,
user_id: &UserId,
@ -947,7 +835,6 @@ impl Users { @@ -947,7 +835,6 @@ impl Users {
})
}
#[tracing::instrument(skip(self, user_id))]
pub fn get_devicelist_version(&self, user_id: &UserId) -> Result<Option<u64>> {
self.userid_devicelistversion
.get(user_id.as_bytes())?
@ -958,7 +845,6 @@ impl Users { @@ -958,7 +845,6 @@ impl Users {
})
}
#[tracing::instrument(skip(self, user_id))]
pub fn all_devices_metadata<'a>(
&'a self,
user_id: &UserId,
@ -975,11 +861,10 @@ impl Users { @@ -975,11 +861,10 @@ impl Users {
}
/// Deactivate account
#[tracing::instrument(skip(self, user_id))]
pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> {
// Remove all associated devices
for device_id in self.all_device_ids(user_id) {
self.remove_device(user_id, &device_id?)?;
self.remove_device(&user_id, &device_id?)?;
}
// Set the password to "" to indicate a deactivated account. Hashes will never result in an

16
src/error.rs

@ -1,3 +1,4 @@ @@ -1,3 +1,4 @@
use log::{error, warn};
use ruma::{
api::client::{
error::{Error as RumaError, ErrorKind},
@ -6,7 +7,6 @@ use ruma::{ @@ -6,7 +7,6 @@ use ruma::{
ServerName,
};
use thiserror::Error;
use tracing::warn;
#[cfg(feature = "conduit_bin")]
use {
@ -17,10 +17,9 @@ use { @@ -17,10 +17,9 @@ use {
Request,
},
ruma::api::client::r0::uiaa::UiaaResponse,
tracing::error,
};
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Error, Debug)]
pub enum Error {
@ -30,15 +29,12 @@ pub enum Error { @@ -30,15 +29,12 @@ pub enum Error {
#[from]
source: sled::Error,
},
#[cfg(feature = "sqlite")]
#[error("There was a problem with the connection to the sqlite database: {source}")]
SqliteError {
#[cfg(feature = "rocksdb")]
#[error("There was a problem with the connection to the rocksdb database: {source}")]
RocksDbError {
#[from]
source: rusqlite::Error,
source: rocksdb::Error,
},
#[cfg(feature = "heed")]
#[error("There was a problem with the connection to the heed database: {error}")]
HeedError { error: String },
#[error("Could not generate an image.")]
ImageError {
#[from]

10
src/lib.rs

@ -1,9 +1,3 @@ @@ -1,9 +1,3 @@
#![warn(
rust_2018_idioms,
unused_qualifications,
clippy::cloned_instead_of_copied,
clippy::str_to_string
)]
#![allow(clippy::suspicious_else_formatting)]
#![deny(clippy::dbg_macro)]
@ -16,10 +10,10 @@ mod ruma_wrapper; @@ -16,10 +10,10 @@ mod ruma_wrapper;
pub mod server_server;
mod utils;
pub use database::{Config, Database};
pub use database::Database;
pub use error::{Error, Result};
pub use pdu::PduEvent;
pub use rocket::Config as RocketConfig;
pub use rocket::Config;
pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse};
use std::ops::Deref;

130
src/main.rs

@ -1,9 +1,4 @@ @@ -1,9 +1,4 @@
#![warn(
rust_2018_idioms,
unused_qualifications,
clippy::cloned_instead_of_copied,
clippy::str_to_string
)]
#![warn(rust_2018_idioms)]
#![allow(clippy::suspicious_else_formatting)]
#![deny(clippy::dbg_macro)]
@ -22,7 +17,6 @@ use std::sync::Arc; @@ -22,7 +17,6 @@ use std::sync::Arc;
use database::Config;
pub use database::Database;
pub use error::{Error, Result};
use opentelemetry::trace::{FutureExt, Tracer};
pub use pdu::PduEvent;
pub use rocket::State;
use ruma::api::client::error::ErrorKind;
@ -36,10 +30,10 @@ use rocket::{ @@ -36,10 +30,10 @@ use rocket::{
},
routes, Request,
};
use tokio::sync::RwLock;
use tracing_subscriber::{prelude::*, EnvFilter};
use tracing::span;
use tracing_subscriber::{prelude::*, Registry};
fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<rocket::Build> {
fn setup_rocket(config: Figment, data: Arc<Database>) -> rocket::Rocket<rocket::Build> {
rocket::custom(config)
.manage(data)
.mount(
@ -55,7 +49,6 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket< @@ -55,7 +49,6 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<
client_server::logout_all_route,
client_server::change_password_route,
client_server::deactivate_route,
client_server::third_party_route,
client_server::get_capabilities_route,
client_server::get_pushrules_all_route,
client_server::set_pushrule_route,
@ -66,7 +59,6 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket< @@ -66,7 +59,6 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<
client_server::set_pushrule_actions_route,
client_server::delete_pushrule_route,
client_server::get_room_event_route,
client_server::get_room_aliases_route,
client_server::get_filter_route,
client_server::create_filter_route,
client_server::set_global_account_data_route,
@ -101,7 +93,6 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket< @@ -101,7 +93,6 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<
client_server::create_typing_event_route,
client_server::create_room_route,
client_server::redact_event_route,
client_server::report_event_route,
client_server::create_alias_route,
client_server::delete_alias_route,
client_server::get_alias_route,
@ -166,8 +157,7 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket< @@ -166,8 +157,7 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<
server_server::get_room_state_route,
server_server::get_room_state_ids_route,
server_server::create_join_event_template_route,
server_server::create_join_event_v1_route,
server_server::create_join_event_v2_route,
server_server::create_join_event_route,
server_server::create_invite_route,
server_server::get_devices_route,
server_server::get_room_information_route,
@ -194,7 +184,7 @@ async fn main() { @@ -194,7 +184,7 @@ async fn main() {
std::env::set_var("CONDUIT_LOG_LEVEL", "off");
let raw_config =
Figment::from(default_config())
Figment::from(rocket::Config::release_default())
.merge(
Toml::file(Env::var("CONDUIT_CONFIG").expect(
"The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml",
@ -203,76 +193,33 @@ async fn main() { @@ -203,76 +193,33 @@ async fn main() {
)
.merge(Env::prefixed("CONDUIT_").global());
std::env::set_var("RUST_LOG", "warn");
let config = raw_config
.extract::<Config>()
.expect("It looks like your config is invalid. Please take a look at the error");
let config = match raw_config.extract::<Config>() {
Ok(s) => s,
Err(e) => {
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e);
std::process::exit(1);
}
};
let start = async {
config.warn_deprecated();
let db = match Database::load_or_create(&config).await {
Ok(db) => db,
Err(e) => {
eprintln!(
"The database couldn't be loaded or created. The following error occured: {}",
e
);
std::process::exit(1);
}
};
let rocket = setup_rocket(raw_config, Arc::clone(&db))
.ignite()
.await
.unwrap();
Database::start_on_shutdown_tasks(db, rocket.shutdown()).await;
rocket.launch().await.unwrap();
};
let db = Database::load_or_create(config.clone())
.await
.expect("config is valid");
if config.allow_jaeger {
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
let tracer = opentelemetry_jaeger::new_pipeline()
.install_batch(opentelemetry::runtime::Tokio)
let (tracer, _uninstall) = opentelemetry_jaeger::new_pipeline()
.with_service_name("conduit")
.install()
.unwrap();
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
Registry::default().with(telemetry).try_init().unwrap();
let span = tracer.start("conduit");
start.with_current_context().await;
drop(span);
let root = span!(tracing::Level::INFO, "app_start", work_units = 2);
let _enter = root.enter();
println!("exporting");
opentelemetry::global::shutdown_tracer_provider();
let rocket = setup_rocket(raw_config, db);
rocket.launch().await.unwrap();
} else {
std::env::set_var("RUST_LOG", &config.log);
let registry = tracing_subscriber::Registry::default();
if config.tracing_flame {
let (flame_layer, _guard) =
tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap();
let flame_layer = flame_layer.with_empty_samples(false);
let filter_layer = EnvFilter::new("trace,h2=off");
let subscriber = registry.with(filter_layer).with(flame_layer);
tracing::subscriber::set_global_default(subscriber).unwrap();
start.await;
} else {
let fmt_layer = tracing_subscriber::fmt::Layer::new();
let filter_layer = EnvFilter::try_from_default_env()
.or_else(|_| EnvFilter::try_new("info"))
.unwrap();
std::env::set_var("RUST_LOG", config.log);
tracing_subscriber::fmt::init();
let subscriber = registry.with(filter_layer).with(fmt_layer);
tracing::subscriber::set_global_default(subscriber).unwrap();
start.await;
}
let rocket = setup_rocket(raw_config, db);
rocket.launch().await.unwrap();
}
}
@ -303,30 +250,3 @@ fn missing_token_catcher() -> Result<()> { @@ -303,30 +250,3 @@ fn missing_token_catcher() -> Result<()> {
fn bad_json_catcher() -> Result<()> {
Err(Error::BadRequest(ErrorKind::BadJson, "Bad json."))
}
fn default_config() -> rocket::Config {
let mut config = rocket::Config::release_default();
{
let mut shutdown = &mut config.shutdown;
#[cfg(unix)]
{
use rocket::config::Sig;
shutdown.signals.insert(Sig::Term);
shutdown.signals.insert(Sig::Int);
}
// Once shutdown is triggered, this is the amount of seconds before rocket
// will forcefully start shutting down connections, this gives enough time to /sync
// requests and the like (which havent gotten the memo, somehow) to still complete gracefully.
shutdown.grace = 35;
// After the grace period, rocket starts shutting down connections, and waits at least this
// many seconds before forcefully shutting all of them down.
shutdown.mercy = 10;
}
config
}

165
src/pdu.rs

@ -1,55 +1,45 @@ @@ -1,55 +1,45 @@
use crate::Error;
use log::error;
use ruma::{
events::{
room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyInitialStateEvent,
AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent,
EventType, StateEvent,
pdu::EventHash, room::member::MemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent,
AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType,
StateEvent,
},
serde::{CanonicalJsonObject, CanonicalJsonValue, Raw},
state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId,
state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName,
ServerSigningKeyId, UInt, UserId,
};
use serde::{Deserialize, Serialize};
use serde_json::{
json,
value::{to_raw_value, RawValue as RawJsonValue},
};
use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc};
use tracing::warn;
/// Content hashes of a PDU.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct EventHash {
/// The SHA-256 hash.
pub sha256: String,
}
use serde_json::json;
use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom};
#[derive(Clone, Deserialize, Serialize, Debug)]
pub struct PduEvent {
pub event_id: Arc<EventId>,
pub room_id: Box<RoomId>,
pub sender: Box<UserId>,
pub event_id: EventId,
pub room_id: RoomId,
pub sender: UserId,
pub origin_server_ts: UInt,
#[serde(rename = "type")]
pub kind: EventType,
pub content: Box<RawJsonValue>,
pub content: serde_json::Value,
#[serde(skip_serializing_if = "Option::is_none")]
pub state_key: Option<String>,
pub prev_events: Vec<Arc<EventId>>,
pub prev_events: Vec<EventId>,
pub depth: UInt,
pub auth_events: Vec<Arc<EventId>>,
pub auth_events: Vec<EventId>,
#[serde(skip_serializing_if = "Option::is_none")]
pub redacts: Option<Arc<EventId>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unsigned: Option<Box<RawJsonValue>>,
pub redacts: Option<EventId>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub unsigned: BTreeMap<String, serde_json::Value>,
pub hashes: EventHash,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub signatures: Option<Box<RawJsonValue>>, // BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, String>>
pub signatures: BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, String>>,
}
impl PduEvent {
#[tracing::instrument(skip(self))]
pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> {
self.unsigned = None;
self.unsigned.clear();
let allowed: &[&str] = match self.kind {
EventType::RoomMember => &["membership"],
@ -69,9 +59,10 @@ impl PduEvent { @@ -69,9 +59,10 @@ impl PduEvent {
_ => &[],
};
let mut old_content: BTreeMap<String, serde_json::Value> =
serde_json::from_str(self.content.get())
.map_err(|_| Error::bad_database("PDU in db has invalid content."))?;
let old_content = self
.content
.as_object_mut()
.ok_or_else(|| Error::bad_database("PDU in db has invalid content."))?;
let mut new_content = serde_json::Map::new();
@ -81,23 +72,12 @@ impl PduEvent { @@ -81,23 +72,12 @@ impl PduEvent {
}
}
self.unsigned = Some(to_raw_value(&json!({
"redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works")
})).expect("to string always works"));
self.content = to_raw_value(&new_content).expect("to string always works");
Ok(())
}
self.unsigned.insert(
"redacted_because".to_owned(),
serde_json::to_value(reason).expect("to_value(PduEvent) always works"),
);
pub fn remove_transaction_id(&mut self) -> crate::Result<()> {
if let Some(unsigned) = &self.unsigned {
let mut unsigned: BTreeMap<String, Box<RawJsonValue>> =
serde_json::from_str(unsigned.get())
.map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?;
unsigned.remove("transaction_id");
self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid"));
}
self.content = new_content.into();
Ok(())
}
@ -212,7 +192,7 @@ impl PduEvent { @@ -212,7 +192,7 @@ impl PduEvent {
}
#[tracing::instrument(skip(self))]
pub fn to_member_event(&self) -> Raw<StateEvent<RoomMemberEventContent>> {
pub fn to_member_event(&self) -> Raw<StateEvent<MemberEventContent>> {
let json = json!({
"content": self.content,
"type": self.kind,
@ -232,7 +212,7 @@ impl PduEvent { @@ -232,7 +212,7 @@ impl PduEvent {
#[tracing::instrument]
pub fn convert_to_outgoing_federation_event(
mut pdu_json: CanonicalJsonObject,
) -> Box<RawJsonValue> {
) -> Raw<ruma::events::pdu::Pdu> {
if let Some(unsigned) = pdu_json
.get_mut("unsigned")
.and_then(|val| val.as_object_mut())
@ -249,7 +229,10 @@ impl PduEvent { @@ -249,7 +229,10 @@ impl PduEvent {
// )
// .expect("Raw::from_value always works")
to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value")
serde_json::from_value::<Raw<_>>(
serde_json::to_value(pdu_json).expect("CanonicalJson is valid serde_json::Value"),
)
.expect("Raw::from_value always works")
}
pub fn from_id_val(
@ -257,7 +240,7 @@ impl PduEvent { @@ -257,7 +240,7 @@ impl PduEvent {
mut json: CanonicalJsonObject,
) -> Result<Self, serde_json::Error> {
json.insert(
"event_id".to_owned(),
"event_id".to_string(),
CanonicalJsonValue::String(event_id.as_str().to_owned()),
);
@ -266,9 +249,7 @@ impl PduEvent { @@ -266,9 +249,7 @@ impl PduEvent {
}
impl state_res::Event for PduEvent {
type Id = Arc<EventId>;
fn event_id(&self) -> &Self::Id {
fn event_id(&self) -> &EventId {
&self.event_id
}
@ -279,34 +260,40 @@ impl state_res::Event for PduEvent { @@ -279,34 +260,40 @@ impl state_res::Event for PduEvent {
fn sender(&self) -> &UserId {
&self.sender
}
fn event_type(&self) -> &EventType {
&self.kind
fn kind(&self) -> EventType {
self.kind.clone()
}
fn content(&self) -> &RawJsonValue {
&self.content
fn content(&self) -> serde_json::Value {
self.content.clone()
}
fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch {
MilliSecondsSinceUnixEpoch(self.origin_server_ts)
}
fn state_key(&self) -> Option<&str> {
self.state_key.as_deref()
fn state_key(&self) -> Option<String> {
self.state_key.clone()
}
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + '_> {
Box::new(self.prev_events.iter())
fn prev_events(&self) -> Vec<EventId> {
self.prev_events.to_vec()
}
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + '_> {
Box::new(self.auth_events.iter())
fn depth(&self) -> &UInt {
&self.depth
}
fn redacts(&self) -> Option<&Self::Id> {
fn auth_events(&self) -> Vec<EventId> {
self.auth_events.to_vec()
}
fn redacts(&self) -> Option<&EventId> {
self.redacts.as_ref()
}
fn hashes(&self) -> &EventHash {
&self.hashes
}
fn signatures(&self) -> BTreeMap<Box<ServerName>, BTreeMap<ruma::ServerSigningKeyId, String>> {
self.signatures.clone()
}
fn unsigned(&self) -> &BTreeMap<String, serde_json::Value> {
&self.unsigned
}
}
// These impl's allow us to dedup state snapshots when resolving state
@ -332,20 +319,18 @@ impl Ord for PduEvent { @@ -332,20 +319,18 @@ impl Ord for PduEvent {
///
/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap<String, CanonicalJsonValue>`.
pub(crate) fn gen_event_id_canonical_json(
pdu: &RawJsonValue,
) -> crate::Result<(Box<EventId>, CanonicalJsonObject)> {
let value = serde_json::from_str(pdu.get()).map_err(|e| {
warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
pdu: &Raw<ruma::events::pdu::Pdu>,
) -> crate::Result<(EventId, CanonicalJsonObject)> {
let value = serde_json::from_str(pdu.json().get()).map_err(|e| {
error!("{:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response")
})?;
let event_id = format!(
let event_id = EventId::try_from(&*format!(
"${}",
// Anything higher than version3 behaves the same
ruma::signatures::reference_hash(&value, &RoomVersionId::V6)
ruma::signatures::reference_hash(&value, &RoomVersionId::Version6)
.expect("ruma can calculate reference hashes")
)
.try_into()
))
.expect("ruma's reference hashes are valid event ids");
Ok((event_id, value))
@ -356,22 +341,8 @@ pub(crate) fn gen_event_id_canonical_json( @@ -356,22 +341,8 @@ pub(crate) fn gen_event_id_canonical_json(
pub struct PduBuilder {
#[serde(rename = "type")]
pub event_type: EventType,
pub content: Box<RawJsonValue>,
pub content: serde_json::Value,
pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>,
pub redacts: Option<Arc<EventId>>,
}
/// Direct conversion prevents loss of the empty `state_key` that ruma requires.
impl From<AnyInitialStateEvent> for PduBuilder {
fn from(event: AnyInitialStateEvent) -> Self {
Self {
event_type: EventType::from(event.event_type()),
content: to_raw_value(&event.content())
.expect("AnyStateEventContent came from JSON and can thus turn back into JSON."),
unsigned: None,
state_key: Some(event.state_key().to_owned()),
redacts: None,
}
}
pub redacts: Option<EventId>,
}

71
src/ruma_wrapper.rs

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
use crate::{database::DatabaseGuard, Error};
use crate::Error;
use ruma::{
api::{client::r0::uiaa::UiaaResponse, OutgoingResponse},
identifiers::{DeviceId, UserId},
@ -9,26 +9,28 @@ use std::ops::Deref; @@ -9,26 +9,28 @@ use std::ops::Deref;
#[cfg(feature = "conduit_bin")]
use {
crate::server_server,
crate::{server_server, Database},
log::{debug, warn},
rocket::{
data::{self, ByteUnit, Data, FromData},
http::Status,
outcome::Outcome::*,
response::{self, Responder},
tokio::io::AsyncReadExt,
Request,
Request, State,
},
ruma::api::{AuthScheme, IncomingRequest},
std::collections::BTreeMap,
std::convert::TryFrom,
std::io::Cursor,
tracing::{debug, warn},
std::sync::Arc,
};
/// This struct converts rocket requests into ruma structs by converting them into http requests
/// first.
pub struct Ruma<T: Outgoing> {
pub body: T::Incoming,
pub sender_user: Option<Box<UserId>>,
pub sender_user: Option<UserId>,
pub sender_device: Option<Box<DeviceId>>,
pub sender_servername: Option<Box<ServerName>>,
// This is None when body is not a valid string
@ -44,14 +46,10 @@ where @@ -44,14 +46,10 @@ where
{
type Error = ();
#[tracing::instrument(skip(request, data))]
async fn from_data(
request: &'a Request<'_>,
data: Data<'a>,
) -> data::Outcome<'a, Self, Self::Error> {
async fn from_data(request: &'a Request<'_>, data: Data) -> data::Outcome<Self, Self::Error> {
let metadata = T::Incoming::METADATA;
let db = request
.guard::<DatabaseGuard>()
.guard::<State<'_, Arc<Database>>>()
.await
.expect("database was loaded");
@ -65,11 +63,7 @@ where @@ -65,11 +63,7 @@ where
let limit = db.globals.max_request_size();
let mut handle = data.open(ByteUnit::Byte(limit.into()));
let mut body = Vec::new();
if handle.read_to_end(&mut body).await.is_err() {
// Client disconnected
// Missing Token
return Failure((Status::new(582), ()));
}
handle.read_to_end(&mut body).await.unwrap();
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
@ -78,14 +72,14 @@ where @@ -78,14 +72,14 @@ where
registration,
)) = db
.appservice
.all()
.iter_all()
.unwrap()
.iter()
.filter_map(|r| r.ok())
.find(|(_id, registration)| {
registration
.get("as_token")
.and_then(|as_token| as_token.as_str())
.map_or(false, |as_token| token == Some(as_token))
.map_or(false, |as_token| token.as_deref() == Some(as_token))
}) {
match metadata.authentication {
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
@ -102,13 +96,14 @@ where @@ -102,13 +96,14 @@ where
.unwrap()
},
|string| {
UserId::parse(string.expect("parsing to string always works")).unwrap()
UserId::try_from(string.expect("parsing to string always works"))
.unwrap()
},
);
if !db.users.exists(&user_id).unwrap() {
// Forbidden
return Failure((Status::new(580), ()));
return Failure((Status::raw(580), ()));
}
// TODO: Check if appservice is allowed to be that user
@ -121,9 +116,9 @@ where @@ -121,9 +116,9 @@ where
match metadata.authentication {
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
if let Some(token) = token {
match db.users.find_from_token(token).unwrap() {
match db.users.find_from_token(&token).unwrap() {
// Unknown Token
None => return Failure((Status::new(581), ())),
None => return Failure((Status::raw(581), ())),
Some((user_id, device_id)) => (
Some(user_id),
Some(Box::<DeviceId>::from(device_id)),
@ -133,7 +128,7 @@ where @@ -133,7 +128,7 @@ where
}
} else {
// Missing Token
return Failure((Status::new(582), ()));
return Failure((Status::raw(582), ()));
}
}
AuthScheme::ServerSignatures => {
@ -155,7 +150,7 @@ where @@ -155,7 +150,7 @@ where
warn!("No Authorization header");
// Forbidden
return Failure((Status::new(580), ()));
return Failure((Status::raw(580), ()));
}
};
@ -165,11 +160,11 @@ where @@ -165,11 +160,11 @@ where
warn!("Invalid X-Matrix header origin field: {:?}", x_matrix);
// Forbidden
return Failure((Status::new(580), ()));
return Failure((Status::raw(580), ()));
}
};
let origin = match ServerName::parse(origin_str) {
let origin = match Box::<ServerName>::try_from(origin_str) {
Ok(s) => s,
_ => {
warn!(
@ -178,7 +173,7 @@ where @@ -178,7 +173,7 @@ where
);
// Forbidden
return Failure((Status::new(580), ()));
return Failure((Status::raw(580), ()));
}
};
@ -188,7 +183,7 @@ where @@ -188,7 +183,7 @@ where
warn!("Invalid X-Matrix header key field: {:?}", x_matrix);
// Forbidden
return Failure((Status::new(580), ()));
return Failure((Status::raw(580), ()));
}
};
@ -198,7 +193,7 @@ where @@ -198,7 +193,7 @@ where
warn!("Invalid X-Matrix header sig field: {:?}", x_matrix);
// Forbidden
return Failure((Status::new(580), ()));
return Failure((Status::raw(580), ()));
}
};
@ -249,7 +244,7 @@ where @@ -249,7 +244,7 @@ where
warn!("Failed to fetch signing keys: {}", e);
// Forbidden
return Failure((Status::new(580), ()));
return Failure((Status::raw(580), ()));
}
};
@ -259,17 +254,14 @@ where @@ -259,17 +254,14 @@ where
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
Ok(()) => (None, None, Some(origin), false),
Err(e) => {
warn!(
"Failed to verify json request from {}: {}\n{:?}",
origin, e, request_map
);
warn!("Failed to verify json request from {}: {}", origin, e);
if request.uri().to_string().contains('@') {
warn!("Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: use nocanon)");
}
// Forbidden
return Failure((Status::new(580), ()));
return Failure((Status::raw(580), ()));
}
}
}
@ -326,8 +318,7 @@ where @@ -326,8 +318,7 @@ where
}),
Err(e) => {
warn!("{:?}", e);
// Bad Json
Failure((Status::new(583), ()))
Failure((Status::raw(583), ()))
}
}
}
@ -342,7 +333,7 @@ impl<T: Outgoing> Deref for Ruma<T> { @@ -342,7 +333,7 @@ impl<T: Outgoing> Deref for Ruma<T> {
}
/// This struct converts ruma responses into rocket http responses.
pub type ConduitResult<T> = Result<RumaResponse<T>, Error>;
pub type ConduitResult<T> = std::result::Result<RumaResponse<T>, Error>;
pub fn response<T: OutgoingResponse>(response: RumaResponse<T>) -> response::Result<'static> {
let http_response = response
@ -353,7 +344,7 @@ pub fn response<T: OutgoingResponse>(response: RumaResponse<T>) -> response::Res @@ -353,7 +344,7 @@ pub fn response<T: OutgoingResponse>(response: RumaResponse<T>) -> response::Res
let mut response = rocket::response::Response::build();
let status = http_response.status();
response.status(Status::new(status.as_u16()));
response.raw_status(status.into(), "");
for header in http_response.headers() {
response.raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned());

2565
src/server_server.rs

File diff suppressed because it is too large Load Diff

44
src/utils.rs

@ -5,11 +5,9 @@ use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; @@ -5,11 +5,9 @@ use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject};
use std::{
cmp,
convert::TryInto,
str::FromStr,
time::{SystemTime, UNIX_EPOCH},
};
#[tracing::instrument]
pub fn millis_since_unix_epoch() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
@ -17,6 +15,15 @@ pub fn millis_since_unix_epoch() -> u64 { @@ -17,6 +15,15 @@ pub fn millis_since_unix_epoch() -> u64 {
.as_millis() as u64
}
#[cfg(feature = "rocksdb")]
pub fn increment_rocksdb(
_new_key: &[u8],
old: Option<&[u8]>,
_operands: &mut rocksdb::MergeOperands,
) -> Option<Vec<u8>> {
increment(old)
}
pub fn increment(old: Option<&[u8]>) -> Option<Vec<u8>> {
let number = match old.map(|bytes| bytes.try_into()) {
Some(Ok(bytes)) => {
@ -40,19 +47,16 @@ pub fn generate_keypair() -> Vec<u8> { @@ -40,19 +47,16 @@ pub fn generate_keypair() -> Vec<u8> {
}
/// Parses the bytes into an u64.
#[tracing::instrument(skip(bytes))]
pub fn u64_from_bytes(bytes: &[u8]) -> Result<u64, std::array::TryFromSliceError> {
let array: [u8; 8] = bytes.try_into()?;
Ok(u64::from_be_bytes(array))
}
/// Parses the bytes into a string.
#[tracing::instrument(skip(bytes))]
pub fn string_from_bytes(bytes: &[u8]) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(bytes.to_vec())
}
#[tracing::instrument(skip(length))]
pub fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
@ -62,7 +66,6 @@ pub fn random_string(length: usize) -> String { @@ -62,7 +66,6 @@ pub fn random_string(length: usize) -> String {
}
/// Calculate a new hash for the given password
#[tracing::instrument(skip(password))]
pub fn calculate_hash(password: &str) -> Result<String, argon2::Error> {
let hashing_config = Config {
variant: Variant::Argon2id,
@ -73,7 +76,6 @@ pub fn calculate_hash(password: &str) -> Result<String, argon2::Error> { @@ -73,7 +76,6 @@ pub fn calculate_hash(password: &str) -> Result<String, argon2::Error> {
argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config)
}
#[tracing::instrument(skip(iterators, check_order))]
pub fn common_elements(
mut iterators: impl Iterator<Item = impl Iterator<Item = Vec<u8>>>,
check_order: impl Fn(&[u8], &[u8]) -> Ordering,
@ -101,7 +103,6 @@ pub fn common_elements( @@ -101,7 +103,6 @@ pub fn common_elements(
/// Fallible conversion from any value that implements `Serialize` to a `CanonicalJsonObject`.
///
/// `value` must serialize to an `serde_json::Value::Object`.
#[tracing::instrument(skip(value))]
pub fn to_canonical_object<T: serde::Serialize>(
value: T,
) -> Result<CanonicalJsonObject, CanonicalJsonError> {
@ -114,30 +115,3 @@ pub fn to_canonical_object<T: serde::Serialize>( @@ -114,30 +115,3 @@ pub fn to_canonical_object<T: serde::Serialize>(
))),
}
}
#[tracing::instrument(skip(deserializer))]
pub fn deserialize_from_str<
'de,
D: serde::de::Deserializer<'de>,
T: FromStr<Err = E>,
E: std::fmt::Display,
>(
deserializer: D,
) -> Result<T, D::Error> {
struct Visitor<T: FromStr<Err = E>, E>(std::marker::PhantomData<T>);
impl<'de, T: FromStr<Err = Err>, Err: std::fmt::Display> serde::de::Visitor<'de>
for Visitor<T, Err>
{
type Value = T;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(formatter, "a parsable string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
v.parse().map_err(serde::de::Error::custom)
}
}
deserializer.deserialize_str(Visitor(std::marker::PhantomData))
}

1
tests/sytest/sytest-whitelist

@ -510,4 +510,3 @@ remote user can join room with version 5 @@ -510,4 +510,3 @@ remote user can join room with version 5
remote user can join room with version 6
setting 'm.room.name' respects room powerlevel
setting 'm.room.power_levels' respects room powerlevel
Federation publicRoom Name/topic keys are correct

15
tests/test-config.toml

@ -1,15 +0,0 @@ @@ -1,15 +0,0 @@
[global]
# Server runs in same container as tests do, so localhost is fine
server_name = "localhost"
# With a bit of luck /tmp is a RAM disk, so that the file system does not become the bottleneck while testing
database_path = "/tmp"
# All the other settings are left at their defaults:
port = 6167
max_request_size = 20_000_000
allow_registration = true
trusted_servers = ["matrix.org"]
address = "127.0.0.1"
proxy = "none"
Loading…
Cancel
Save