Compare commits

..

4 Commits

  1. 87
      .gitlab-ci.yml
  2. 43
      Cargo.lock
  3. 4
      Cargo.toml
  4. 25
      DEPLOY.md
  5. 7
      Dockerfile
  6. 7
      docker/ci-binaries-packaging.Dockerfile
  7. 30
      src/client_server/account.rs
  8. 26
      src/client_server/capabilities.rs
  9. 2
      src/client_server/directory.rs
  10. 22
      src/client_server/keys.rs
  11. 73
      src/client_server/membership.rs
  12. 16
      src/client_server/message.rs
  13. 10
      src/client_server/push.rs
  14. 4
      src/client_server/redact.rs
  15. 15
      src/client_server/report.rs
  16. 43
      src/client_server/room.rs
  17. 6
      src/client_server/state.rs
  18. 17
      src/client_server/sync.rs
  19. 6
      src/client_server/voip.rs
  20. 30
      src/database.rs
  21. 11
      src/database/admin.rs
  22. 57
      src/database/globals.rs
  23. 8
      src/database/key_backups.rs
  24. 4
      src/database/pusher.rs
  25. 180
      src/database/rooms.rs
  26. 35
      src/database/rooms/edus.rs
  27. 11
      src/database/sending.rs
  28. 32
      src/database/users.rs
  29. 49
      src/pdu.rs
  30. 10
      src/ruma_wrapper.rs
  31. 182
      src/server_server.rs

87
.gitlab-ci.yml

@ -142,12 +142,8 @@ build:debug:cargo:x86_64-unknown-linux-musl:
DOCKER_HOST: tcp://docker:2375/ DOCKER_HOST: tcp://docker:2375/
DOCKER_TLS_CERTDIR: "" DOCKER_TLS_CERTDIR: ""
DOCKER_DRIVER: overlay2 DOCKER_DRIVER: overlay2
PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64" PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64"
DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile"
cache:
paths:
- docker_cache
key: "$CI_JOB_NAME"
before_script: before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# Only log in to Dockerhub if the credentials are given: # Only log in to Dockerhub if the credentials are given:
@ -160,51 +156,80 @@ build:debug:cargo:x86_64-unknown-linux-musl:
- mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64 - mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64
- mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6 - mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6
- mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7 - mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7
- mv ./conduit-aarch64-unknown-linux-musl linux/arm64 - mkdir -p linux/arm64/ && mv ./conduit-aarch64-unknown-linux-musl linux/arm64/v8
- 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"' # Actually create multiarch image:
# Build and push image:
- > - >
docker buildx build docker buildx build
--pull --pull
--push --push
--cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
--cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache
--build-arg CREATED=$CREATED
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
--platform "$PLATFORMS" --platform "$PLATFORMS"
--tag "$TAG" --tag "$GL_IMAGE_TAG"
--tag "$TAG-alpine" --tag "$GL_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"
--tag "$TAG-commit-$CI_COMMIT_SHORT_SHA"
--file "$DOCKER_FILE" . --file "$DOCKER_FILE" .
# Only try to push to docker hub, if auth data for dockerhub exists:
- if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG"; fi
- if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"; fi
docker:next:gitlab: build:docker:next:
extends: .docker-shared-settings extends: .docker-shared-settings
rules: rules:
- if: '$CI_COMMIT_BRANCH == "next"' - if: '$CI_COMMIT_BRANCH == "next"'
variables: variables:
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
docker:next:dockerhub: build:docker:master:
extends: .docker-shared-settings
rules:
- if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB'
variables:
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
docker:master:gitlab:
extends: .docker-shared-settings extends: .docker-shared-settings
rules: rules:
- if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "master"'
variables: variables:
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
docker:master:dockerhub: ## Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image
extends: .docker-shared-settings #.docker-shared-settings:
rules: # stage: "build docker image"
- if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB' # needs: []
variables: # interruptible: true
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" # image:
# name: "gcr.io/kaniko-project/executor:debug"
# entrypoint: [""]
# tags: ["docker"]
# variables:
# # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache
# KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache"
# before_script:
# - "mkdir -p /kaniko/.docker"
# - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json'
#
#
#build:docker:next:
# extends: .docker-shared-settings
# needs:
# - "build:release:cargo:x86_64-unknown-linux-musl"
# script:
# - >
# /kaniko/executor
# $KANIKO_CACHE_ARGS
# --force
# --context $CI_PROJECT_DIR
# --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
# --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
# --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
# --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
# --destination "$CI_REGISTRY_IMAGE/conduit:next"
# --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine"
# --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA"
# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine"
# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA"
# rules:
# - if: '$CI_COMMIT_BRANCH == "next"'
#
#
# --------------------------------------------------------------------- # # --------------------------------------------------------------------- #
# Run tests # # Run tests #

43
Cargo.lock generated

@ -1516,6 +1516,12 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "paste"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
[[package]] [[package]]
name = "pear" name = "pear"
version = "0.2.3" version = "0.2.3"
@ -1984,7 +1990,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma" name = "ruma"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"assign", "assign",
"js_int", "js_int",
@ -2005,7 +2011,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-api" name = "ruma-api"
version = "0.18.5" version = "0.18.5"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"bytes", "bytes",
"http", "http",
@ -2021,7 +2027,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-api-macros" name = "ruma-api-macros"
version = "0.18.5" version = "0.18.5"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2032,7 +2038,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-appservice-api" name = "ruma-appservice-api"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"ruma-api", "ruma-api",
"ruma-common", "ruma-common",
@ -2046,7 +2052,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-client-api" name = "ruma-client-api"
version = "0.12.3" version = "0.12.3"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"assign", "assign",
"bytes", "bytes",
@ -2066,7 +2072,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-common" name = "ruma-common"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"js_int", "js_int",
@ -2081,7 +2087,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events" name = "ruma-events"
version = "0.24.6" version = "0.24.6"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"indoc", "indoc",
"js_int", "js_int",
@ -2097,7 +2103,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events-macros" name = "ruma-events-macros"
version = "0.24.6" version = "0.24.6"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2108,7 +2114,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-federation-api" name = "ruma-federation-api"
version = "0.3.1" version = "0.3.1"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2123,8 +2129,9 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers" name = "ruma-identifiers"
version = "0.20.0" version = "0.20.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"paste",
"percent-encoding", "percent-encoding",
"rand 0.8.4", "rand 0.8.4",
"ruma-identifiers-macros", "ruma-identifiers-macros",
@ -2137,7 +2144,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-macros" name = "ruma-identifiers-macros"
version = "0.20.0" version = "0.20.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"quote", "quote",
"ruma-identifiers-validation", "ruma-identifiers-validation",
@ -2147,7 +2154,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-validation" name = "ruma-identifiers-validation"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"thiserror", "thiserror",
] ]
@ -2155,7 +2162,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identity-service-api" name = "ruma-identity-service-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2168,7 +2175,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-push-gateway-api" name = "ruma-push-gateway-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2183,7 +2190,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-serde" name = "ruma-serde"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"bytes", "bytes",
"form_urlencoded", "form_urlencoded",
@ -2197,7 +2204,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-serde-macros" name = "ruma-serde-macros"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2208,7 +2215,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-signatures" name = "ruma-signatures"
version = "0.9.0" version = "0.9.0"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"ed25519-dalek", "ed25519-dalek",
@ -2225,7 +2232,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-state-res" name = "ruma-state-res"
version = "0.4.1" version = "0.4.1"
source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a"
dependencies = [ dependencies = [
"itertools 0.10.1", "itertools 0.10.1",
"js_int", "js_int",

4
Cargo.toml

@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request
# Used for matrix spec type definitions and helpers # Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } ruma = { git = "https://github.com/ruma/ruma", rev = "e7f01ca55a1eff437bad754bf0554cc09f44ec2a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
@ -40,7 +40,7 @@ serde_json = { version = "1.0.67", features = ["raw_value"] }
# Used for appservice registration files # Used for appservice registration files
serde_yaml = "0.8.20" serde_yaml = "0.8.20"
# Used for pdu definition # Used for pdu definition
serde = { version = "1.0.130", features = ["rc"] } serde = "1.0.130"
# Used for secure identifiers # Used for secure identifiers
rand = "0.8.4" rand = "0.8.4"
# Used to hash passwords # Used to hash passwords

25
DEPLOY.md

@ -12,17 +12,20 @@ only offer Linux binaries.
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
| CPU Architecture | Download stable version | | CPU Architecture | Download link |
| ------------------------------------------- | ------------------------------ | | ------------------------------------------- | ----------------------- |
| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | | x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl] |
| armv6 | [Download][armv6-musl-master] | | armv6 | [Download][armv6-musl] |
| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | | armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl] |
| armv8 / aarch64 | [Download][armv8-musl-master] | | armv8 / aarch64 | [Download][armv8-musl] |
[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl [x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf [armv6-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
[armv7-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
[armv8-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
```bash ```bash
$ sudo wget -O /usr/local/bin/matrix-conduit <url> $ sudo wget -O /usr/local/bin/matrix-conduit <url>

7
Dockerfile

@ -32,7 +32,7 @@ RUN touch src/main.rs && touch src/lib.rs && cargo build --release
# --------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------
# Stuff below this line actually ends up in the resulting docker image # Stuff below this line actually ends up in the resulting docker image
# --------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------
FROM docker.io/alpine:3.15.0 AS runner FROM docker.io/alpine:3.14 AS runner
# Standard port on which Conduit launches. # Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose. # You still need to map the port when using the docker command or docker-compose.
@ -46,6 +46,7 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. # libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \ RUN apk add --no-cache \
ca-certificates \ ca-certificates \
curl \
libgcc libgcc
@ -53,11 +54,11 @@ RUN apk add --no-cache \
RUN mkdir -p /srv/conduit/.local/share/conduit RUN mkdir -p /srv/conduit/.local/share/conduit
# Test if Conduit is still alive, uses the same endpoint as Element # Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh COPY ./docker/healthcheck.sh /srv/conduit/
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Copy over the actual Conduit binary from the builder stage # Copy over the actual Conduit binary from the builder stage
COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/
# Improve security: Don't run stuff as root, that does not need to run as root: # Improve security: Don't run stuff as root, that does not need to run as root:
# Add www-data user and group with UID 82, as used by alpine # Add www-data user and group with UID 82, as used by alpine

7
docker/ci-binaries-packaging.Dockerfile

@ -1,13 +1,14 @@
# syntax=docker/dockerfile:1 # syntax=docker/dockerfile:1
# --------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------
# This Dockerfile is intended to be built as part of Conduit's CI pipeline. # This Dockerfile is intended to be built as part of Conduit's CI pipeline.
# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs. # It does not build Conduit in Docker, but just copies the matching build artifact from the build job.
# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary.
# #
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching. # It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
# Credit's for the original Dockerfile: Weasy666. # Credit's for the original Dockerfile: Weasy666.
# --------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------
FROM docker.io/alpine:3.15.0 AS runner FROM docker.io/alpine:3.14 AS runner
# Standard port on which Conduit launches. # Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose. # You still need to map the port when using the docker command or docker-compose.
@ -46,7 +47,7 @@ LABEL org.opencontainers.image.created=${CREATED} \
RUN mkdir -p /srv/conduit/.local/share/conduit RUN mkdir -p /srv/conduit/.local/share/conduit
# Test if Conduit is still alive, uses the same endpoint as Element # Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh COPY ./docker/healthcheck.sh /srv/conduit/
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh

30
src/client_server/account.rs

@ -1,4 +1,8 @@
use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
};
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
@ -7,9 +11,10 @@ use ruma::{
error::ErrorKind, error::ErrorKind,
r0::{ r0::{
account::{ account::{
change_password, deactivate, get_3pids, get_username_availability, register, change_password, deactivate, get_username_availability, register, whoami,
whoami, ThirdPartyIdRemovalStatus, ThirdPartyIdRemovalStatus,
}, },
contact::get_contacts,
uiaa::{AuthFlow, AuthType, UiaaInfo}, uiaa::{AuthFlow, AuthType, UiaaInfo},
}, },
}, },
@ -29,7 +34,7 @@ use ruma::{
EventType, EventType,
}, },
identifiers::RoomName, identifiers::RoomName,
push, RoomAliasId, RoomId, RoomVersionId, UserId, push, RoomAliasId, RoomId, UserId,
}; };
use serde_json::value::to_raw_value; use serde_json::value::to_raw_value;
use tracing::info; use tracing::info;
@ -277,7 +282,7 @@ pub async fn register_route(
let mut content = RoomCreateEventContent::new(conduit_user.clone()); let mut content = RoomCreateEventContent::new(conduit_user.clone());
content.federate = true; content.federate = true;
content.predecessor = None; content.predecessor = None;
content.room_version = RoomVersionId::V6; content.room_version = db.globals.default_room_version();
// 1. The room create event // 1. The room create event
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
@ -306,7 +311,6 @@ pub async fn register_route(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -393,7 +397,8 @@ pub async fn register_route(
)?; )?;
// 6. Events implied by name and topic // 6. Events implied by name and topic
let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) let room_name =
Box::<RoomName>::try_from(format!("{} Admin Room", db.globals.server_name()))
.expect("Room name is valid"); .expect("Room name is valid");
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
@ -428,7 +433,7 @@ pub async fn register_route(
)?; )?;
// Room alias // Room alias
let alias: Box<RoomAliasId> = format!("#admins:{}", db.globals.server_name()) let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name())
.try_into() .try_into()
.expect("#admins:server_name is a valid alias name"); .expect("#admins:server_name is a valid alias name");
@ -464,7 +469,6 @@ pub async fn register_route(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -487,7 +491,6 @@ pub async fn register_route(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -704,7 +707,6 @@ pub async fn deactivate_route(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}; };
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
@ -755,9 +757,9 @@ pub async fn deactivate_route(
get("/_matrix/client/r0/account/3pid", data = "<body>") get("/_matrix/client/r0/account/3pid", data = "<body>")
)] )]
pub async fn third_party_route( pub async fn third_party_route(
body: Ruma<get_3pids::Request>, body: Ruma<get_contacts::Request>,
) -> ConduitResult<get_3pids::Response> { ) -> ConduitResult<get_contacts::Response> {
let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(get_3pids::Response::new(Vec::new()).into()) Ok(get_contacts::Response::new(Vec::new()).into())
} }

26
src/client_server/capabilities.rs

@ -1,9 +1,6 @@
use crate::{ConduitResult, Ruma}; use crate::{database::DatabaseGuard, ConduitResult, Ruma};
use ruma::{ use ruma::api::client::r0::capabilities::{
api::client::r0::capabilities::{
get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability,
},
RoomVersionId,
}; };
use std::collections::BTreeMap; use std::collections::BTreeMap;
@ -17,17 +14,28 @@ use rocket::get;
feature = "conduit_bin", feature = "conduit_bin",
get("/_matrix/client/r0/capabilities", data = "<_body>") get("/_matrix/client/r0/capabilities", data = "<_body>")
)] )]
#[tracing::instrument(skip(_body))] #[tracing::instrument(skip(_body, db))]
pub async fn get_capabilities_route( pub async fn get_capabilities_route(
_body: Ruma<get_capabilities::Request>, _body: Ruma<get_capabilities::Request>,
db: DatabaseGuard,
) -> ConduitResult<get_capabilities::Response> { ) -> ConduitResult<get_capabilities::Response> {
let mut available = BTreeMap::new(); let mut available = BTreeMap::new();
available.insert(RoomVersionId::V5, RoomVersionStability::Stable); if db.globals.allow_unstable_room_versions() {
available.insert(RoomVersionId::V6, RoomVersionStability::Stable); for room_version in &db.globals.unstable_room_versions {
available.insert(room_version.clone(), RoomVersionStability::Stable);
}
} else {
for room_version in &db.globals.unstable_room_versions {
available.insert(room_version.clone(), RoomVersionStability::Unstable);
}
}
for room_version in &db.globals.stable_room_versions {
available.insert(room_version.clone(), RoomVersionStability::Stable);
}
let mut capabilities = Capabilities::new(); let mut capabilities = Capabilities::new();
capabilities.room_versions = RoomVersionsCapability { capabilities.room_versions = RoomVersionsCapability {
default: RoomVersionId::V6, default: db.globals.default_room_version(),
available, available,
}; };

2
src/client_server/directory.rs

@ -167,7 +167,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
other_server, other_server,
federation::directory::get_public_rooms_filtered::v1::Request { federation::directory::get_public_rooms_filtered::v1::Request {
limit, limit,
since, since: since.as_deref(),
filter: Filter { filter: Filter {
generic_search_term: filter.generic_search_term.as_deref(), generic_search_term: filter.generic_search_term.as_deref(),
}, },

22
src/client_server/keys.rs

@ -316,7 +316,7 @@ pub async fn get_key_changes_route(
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>( pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
sender_user: Option<&UserId>, sender_user: Option<&UserId>,
device_keys_input: &BTreeMap<Box<UserId>, Vec<Box<DeviceId>>>, device_keys_input: &BTreeMap<UserId, Vec<Box<DeviceId>>>,
allowed_signatures: F, allowed_signatures: F,
db: &Database, db: &Database,
) -> Result<get_keys::Response> { ) -> Result<get_keys::Response> {
@ -328,8 +328,6 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
let mut get_over_federation = HashMap::new(); let mut get_over_federation = HashMap::new();
for (user_id, device_ids) in device_keys_input { for (user_id, device_ids) in device_keys_input {
let user_id: &UserId = &**user_id;
if user_id.server_name() != db.globals.server_name() { if user_id.server_name() != db.globals.server_name() {
get_over_federation get_over_federation
.entry(user_id.server_name()) .entry(user_id.server_name())
@ -357,11 +355,11 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
container.insert(device_id, keys); container.insert(device_id, keys);
} }
} }
device_keys.insert(user_id.to_owned(), container); device_keys.insert(user_id.clone(), container);
} else { } else {
for device_id in device_ids { for device_id in device_ids {
let mut container = BTreeMap::new(); let mut container = BTreeMap::new();
if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? { if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), device_id)? {
let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
Error::BadRequest( Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -373,24 +371,24 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
device_display_name: metadata.display_name, device_display_name: metadata.display_name,
}; };
container.insert(device_id.to_owned(), keys); container.insert(device_id.clone(), keys);
} }
device_keys.insert(user_id.to_owned(), container); device_keys.insert(user_id.clone(), container);
} }
} }
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
master_keys.insert(user_id.to_owned(), master_key); master_keys.insert(user_id.clone(), master_key);
} }
if let Some(self_signing_key) = db if let Some(self_signing_key) = db
.users .users
.get_self_signing_key(user_id, &allowed_signatures)? .get_self_signing_key(user_id, &allowed_signatures)?
{ {
self_signing_keys.insert(user_id.to_owned(), self_signing_key); self_signing_keys.insert(user_id.clone(), self_signing_key);
} }
if Some(user_id) == sender_user { if Some(user_id) == sender_user {
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
user_signing_keys.insert(user_id.to_owned(), user_signing_key); user_signing_keys.insert(user_id.clone(), user_signing_key);
} }
} }
} }
@ -402,7 +400,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
.map(|(server, vec)| async move { .map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new(); let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec { for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); device_keys_input_fed.insert(user_id.clone(), keys.clone());
} }
( (
server, server,
@ -442,7 +440,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
} }
pub(crate) async fn claim_keys_helper( pub(crate) async fn claim_keys_helper(
one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>, one_time_keys_input: &BTreeMap<UserId, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
db: &Database, db: &Database,
) -> Result<claim_keys::Response> { ) -> Result<claim_keys::Response> {
let mut one_time_keys = BTreeMap::new(); let mut one_time_keys = BTreeMap::new();

73
src/client_server/membership.rs

@ -31,7 +31,6 @@ use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
iter,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
@ -65,7 +64,7 @@ pub async fn join_room_by_id_route(
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::parse(sender).ok()) .filter_map(|sender| UserId::try_from(sender).ok())
.map(|user| user.server_name().to_owned()) .map(|user| user.server_name().to_owned())
.collect(); .collect();
@ -73,7 +72,7 @@ pub async fn join_room_by_id_route(
let ret = join_room_by_id_helper( let ret = join_room_by_id_helper(
&db, &db,
body.sender_user.as_deref(), body.sender_user.as_ref(),
&body.room_id, &body.room_id,
&servers, &servers,
body.third_party_signed.as_ref(), body.third_party_signed.as_ref(),
@ -100,10 +99,9 @@ pub async fn join_room_by_id_or_alias_route(
db: DatabaseGuard, db: DatabaseGuard,
body: Ruma<join_room_by_id_or_alias::Request<'_>>, body: Ruma<join_room_by_id_or_alias::Request<'_>>,
) -> ConduitResult<join_room_by_id_or_alias::Response> { ) -> ConduitResult<join_room_by_id_or_alias::Response> {
let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
let (servers, room_id) = match Box::<RoomId>::try_from(body.room_id_or_alias) { let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) {
Ok(room_id) => { Ok(room_id) => {
let mut servers: HashSet<_> = db let mut servers: HashSet<_> = db
.rooms .rooms
@ -113,7 +111,7 @@ pub async fn join_room_by_id_or_alias_route(
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::parse(sender).ok()) .filter_map(|sender| UserId::try_from(sender).ok())
.map(|user| user.server_name().to_owned()) .map(|user| user.server_name().to_owned())
.collect(); .collect();
@ -129,7 +127,7 @@ pub async fn join_room_by_id_or_alias_route(
let join_room_response = join_room_by_id_helper( let join_room_response = join_room_by_id_helper(
&db, &db,
Some(sender_user), body.sender_user.as_ref(),
&room_id, &room_id,
&servers, &servers,
body.third_party_signed.as_ref(), body.third_party_signed.as_ref(),
@ -286,7 +284,6 @@ pub async fn ban_user_route(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}), }),
|event| { |event| {
serde_json::from_str(event.content.get()) serde_json::from_str(event.content.get())
@ -534,7 +531,7 @@ async fn join_room_by_id_helper(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.to_owned()) .entry(room_id.clone())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -554,7 +551,7 @@ async fn join_room_by_id_helper(
federation::membership::create_join_event_template::v1::Request { federation::membership::create_join_event_template::v1::Request {
room_id, room_id,
user_id: sender_user, user_id: sender_user,
ver: &[RoomVersionId::V5, RoomVersionId::V6], ver: &db.globals.supported_room_versions(),
}, },
) )
.await; .await;
@ -569,11 +566,7 @@ async fn join_room_by_id_helper(
let (make_join_response, remote_server) = make_join_response_and_server?; let (make_join_response, remote_server) = make_join_response_and_server?;
let room_version = match make_join_response.room_version { let room_version = match make_join_response.room_version {
Some(room_version) Some(room_version) if db.rooms.is_supported_version(&db, &room_version) => room_version,
if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 =>
{
room_version
}
_ => return Err(Error::BadServerResponse("Room version is not supported")), _ => return Err(Error::BadServerResponse("Room version is not supported")),
}; };
@ -605,7 +598,6 @@ async fn join_room_by_id_helper(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
); );
@ -623,12 +615,11 @@ async fn join_room_by_id_helper(
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
// Generate event id // Generate event id
let event_id = format!( let event_id = EventId::try_from(&*format!(
"${}", "${}",
ruma::signatures::reference_hash(&join_event_stub, &room_version) ruma::signatures::reference_hash(&join_event_stub, &room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
); ))
let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
// Add event_id back // Add event_id back
@ -647,7 +638,7 @@ async fn join_room_by_id_helper(
remote_server, remote_server,
federation::membership::create_join_event::v2::Request { federation::membership::create_join_event::v2::Request {
room_id, room_id,
event_id, event_id: &event_id,
pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()),
}, },
) )
@ -655,7 +646,7 @@ async fn join_room_by_id_helper(
db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; db.rooms.get_or_create_shortroomid(room_id, &db.globals)?;
let pdu = PduEvent::from_id_val(event_id, join_event.clone()) let pdu = PduEvent::from_id_val(&event_id, join_event.clone())
.map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?;
let mut state = HashMap::new(); let mut state = HashMap::new();
@ -743,7 +734,7 @@ async fn join_room_by_id_helper(
db.rooms.append_pdu( db.rooms.append_pdu(
&pdu, &pdu,
utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"),
iter::once(&*pdu.event_id), &[pdu.event_id.clone()],
db, db,
)?; )?;
@ -759,7 +750,6 @@ async fn join_room_by_id_helper(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}; };
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
@ -781,7 +771,7 @@ async fn join_room_by_id_helper(
db.flush()?; db.flush()?;
Ok(join_room_by_id::Response::new(room_id.to_owned()).into()) Ok(join_room_by_id::Response::new(room_id.clone()).into())
} }
fn validate_and_add_event_id( fn validate_and_add_event_id(
@ -789,12 +779,12 @@ fn validate_and_add_event_id(
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
db: &Database, db: &Database,
) -> Result<(Box<EventId>, CanonicalJsonObject)> { ) -> Result<(EventId, CanonicalJsonObject)> {
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response") Error::BadServerResponse("Invalid PDU in server response")
})?; })?;
let event_id = EventId::parse(format!( let event_id = EventId::try_from(&*format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -861,7 +851,7 @@ pub(crate) async fn invite_helper<'a>(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.to_owned()) .entry(room_id.clone())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -895,9 +885,12 @@ pub(crate) async fn invite_helper<'a>(
None None
}; };
// If there was no create event yet, assume we are creating a version 6 room right now // If there was no create event yet, assume we are creating a room with the default
// version right now
let room_version_id = create_event_content let room_version_id = create_event_content
.map_or(RoomVersionId::V6, |create_event| create_event.room_version); .map_or(db.globals.default_room_version(), |create_event| {
create_event.room_version
});
let room_version = let room_version =
RoomVersion::new(&room_version_id).expect("room version is supported"); RoomVersion::new(&room_version_id).expect("room version is supported");
@ -909,7 +902,6 @@ pub(crate) async fn invite_helper<'a>(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("member event is valid value"); .expect("member event is valid value");
@ -943,9 +935,9 @@ pub(crate) async fn invite_helper<'a>(
} }
let pdu = PduEvent { let pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(), event_id: ruma::event_id!("$thiswillbefilledinlater"),
room_id: room_id.to_owned(), room_id: room_id.clone(),
sender: sender_user.to_owned(), sender: sender_user.clone(),
origin_server_ts: utils::millis_since_unix_epoch() origin_server_ts: utils::millis_since_unix_epoch()
.try_into() .try_into()
.expect("time is valid"), .expect("time is valid"),
@ -1018,12 +1010,11 @@ pub(crate) async fn invite_helper<'a>(
}; };
// Generate event id // Generate event id
let expected_event_id = format!( let expected_event_id = EventId::try_from(&*format!(
"${}", "${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id) ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
); ))
let expected_event_id = <&EventId>::try_from(expected_event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
let response = db let response = db
@ -1033,7 +1024,7 @@ pub(crate) async fn invite_helper<'a>(
user_id.server_name(), user_id.server_name(),
create_invite::v2::Request { create_invite::v2::Request {
room_id, room_id,
event_id: expected_event_id, event_id: &expected_event_id,
room_version: &room_version_id, room_version: &room_version_id,
event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()),
invite_room_state: &invite_room_state, invite_room_state: &invite_room_state,
@ -1044,7 +1035,8 @@ pub(crate) async fn invite_helper<'a>(
let pub_key_map = RwLock::new(BTreeMap::new()); let pub_key_map = RwLock::new(BTreeMap::new());
// We do not add the event_id field to the pdu here because of signature and hashes checks // We do not add the event_id field to the pdu here because of signature and hashes checks
let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event) { let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event, &db)
{
Ok(t) => t, Ok(t) => t,
Err(_) => { Err(_) => {
// Event could not be converted to canonical json // Event could not be converted to canonical json
@ -1105,7 +1097,7 @@ pub(crate) async fn invite_helper<'a>(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.to_owned()) .entry(room_id.clone())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -1121,7 +1113,6 @@ pub(crate) async fn invite_helper<'a>(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(user_id)?, blurhash: db.users.blurhash(user_id)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,

16
src/client_server/message.rs

@ -5,8 +5,13 @@ use ruma::{
r0::message::{get_message_events, send_message_event}, r0::message::{get_message_events, send_message_event},
}, },
events::EventType, events::EventType,
EventId,
};
use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
}; };
use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, put}; use rocket::{get, put};
@ -62,9 +67,10 @@ pub async fn send_message_event_route(
)); ));
} }
let event_id = utils::string_from_bytes(&response) let event_id = EventId::try_from(
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? utils::string_from_bytes(&response)
.try_into() .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?,
)
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
return Ok(send_message_event::Response { event_id }.into()); return Ok(send_message_event::Response { event_id }.into());
} }
@ -98,7 +104,7 @@ pub async fn send_message_event_route(
db.flush()?; db.flush()?;
Ok(send_message_event::Response::new((*event_id).to_owned()).into()) Ok(send_message_event::Response::new(event_id).into())
} }
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages` /// # `GET /_matrix/client/r0/rooms/{roomId}/messages`

10
src/client_server/push.rs

@ -105,15 +105,15 @@ pub async fn get_pushrule_route(
/// Creates a single specified push rule for this user. /// Creates a single specified push rule for this user.
#[cfg_attr( #[cfg_attr(
feature = "conduit_bin", feature = "conduit_bin",
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>") put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<req>")
)] )]
#[tracing::instrument(skip(db, body))] #[tracing::instrument(skip(db, req))]
pub async fn set_pushrule_route( pub async fn set_pushrule_route(
db: DatabaseGuard, db: DatabaseGuard,
body: Ruma<set_pushrule::Request<'_>>, req: Ruma<set_pushrule::Request<'_>>,
) -> ConduitResult<set_pushrule::Response> { ) -> ConduitResult<set_pushrule::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = req.sender_user.as_ref().expect("user is authenticated");
let body = body.body; let body = req.body;
if body.scope != "global" { if body.scope != "global" {
return Err(Error::BadRequest( return Err(Error::BadRequest(

4
src/client_server/redact.rs

@ -25,7 +25,6 @@ pub async fn redact_event_route(
body: Ruma<redact_event::Request<'_>>, body: Ruma<redact_event::Request<'_>>,
) -> ConduitResult<redact_event::Response> { ) -> ConduitResult<redact_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
db.globals db.globals
@ -46,7 +45,7 @@ pub async fn redact_event_route(
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: Some(body.event_id.into()), redacts: Some(body.event_id.clone()),
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -58,6 +57,5 @@ pub async fn redact_event_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(redact_event::Response { event_id }.into()) Ok(redact_event::Response { event_id }.into())
} }

15
src/client_server/report.rs

@ -1,11 +1,8 @@
use crate::{ use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma};
database::{admin::AdminCommand, DatabaseGuard},
ConduitResult, Error, Ruma,
};
use ruma::{ use ruma::{
api::client::{error::ErrorKind, r0::room::report_content}, api::client::{error::ErrorKind, r0::room::report_content},
events::room::message, events::room::message,
int, Int,
}; };
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
@ -36,7 +33,7 @@ pub async fn report_event_route(
} }
}; };
if body.score > int!(0) || body.score < int!(-100) { if body.score > Int::from(0) || body.score < Int::from(-100) {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Invalid score, must be within 0 to -100", "Invalid score, must be within 0 to -100",
@ -60,7 +57,8 @@ pub async fn report_event_route(
Report Score: {}\n\ Report Score: {}\n\
Report Reason: {}", Report Reason: {}",
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
), )
.to_owned(),
format!( format!(
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0}\">{0}\ "<details><summary>Report received from: <a href=\"https://matrix.to/#/{0}\">{0}\
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1}</code>\ </a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1}</code>\
@ -74,7 +72,8 @@ pub async fn report_event_route(
pdu.sender, pdu.sender,
body.score, body.score,
RawStr::new(&body.reason).html_escape() RawStr::new(&body.reason).html_escape()
), )
.to_owned(),
), ),
)); ));

43
src/client_server/room.rs

@ -22,12 +22,16 @@ use ruma::{
}, },
EventType, EventType,
}, },
int,
serde::{CanonicalJsonObject, JsonObject}, serde::{CanonicalJsonObject, JsonObject},
RoomAliasId, RoomId, RoomVersionId, RoomAliasId, RoomId,
}; };
use serde_json::{json, value::to_raw_value}; use serde_json::{json, value::to_raw_value};
use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc}; use std::{
cmp::max,
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
};
use tracing::{info, warn}; use tracing::{info, warn};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
@ -84,16 +88,14 @@ pub async fn create_room_route(
)); ));
} }
let alias: Option<Box<RoomAliasId>> = let alias: Option<RoomAliasId> =
body.room_alias_name body.room_alias_name
.as_ref() .as_ref()
.map_or(Ok(None), |localpart| { .map_or(Ok(None), |localpart| {
// TODO: Check for invalid characters and maximum length // TODO: Check for invalid characters and maximum length
let alias = let alias =
RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name())) RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name()))
.map_err(|_| { .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.")
})?;
if db.rooms.id_from_alias(&alias)?.is_some() { if db.rooms.id_from_alias(&alias)?.is_some() {
Err(Error::BadRequest( Err(Error::BadRequest(
@ -107,7 +109,7 @@ pub async fn create_room_route(
let room_version = match body.room_version.clone() { let room_version = match body.room_version.clone() {
Some(room_version) => { Some(room_version) => {
if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 { if db.rooms.is_supported_version(&db, &room_version) {
room_version room_version
} else { } else {
return Err(Error::BadRequest( return Err(Error::BadRequest(
@ -116,7 +118,7 @@ pub async fn create_room_route(
)); ));
} }
} }
None => RoomVersionId::V6, None => db.globals.default_room_version(),
}; };
let content = match &body.creation_content { let content = match &body.creation_content {
@ -162,7 +164,7 @@ pub async fn create_room_route(
.get(), .get(),
); );
if de_result.is_err() { if let Err(_) = de_result {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::BadJson, ErrorKind::BadJson,
"Invalid creation content", "Invalid creation content",
@ -196,7 +198,6 @@ pub async fn create_room_route(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -222,11 +223,11 @@ pub async fn create_room_route(
}); });
let mut users = BTreeMap::new(); let mut users = BTreeMap::new();
users.insert(sender_user.clone(), int!(100)); users.insert(sender_user.clone(), 100.into());
if preset == create_room::RoomPreset::TrustedPrivateChat { if preset == create_room::RoomPreset::TrustedPrivateChat {
for invite_ in &body.invite { for invite_ in &body.invite {
users.insert(invite_.clone(), int!(100)); users.insert(invite_.clone(), 100.into());
} }
} }
@ -268,7 +269,7 @@ pub async fn create_room_route(
PduBuilder { PduBuilder {
event_type: EventType::RoomCanonicalAlias, event_type: EventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent { content: to_raw_value(&RoomCanonicalAliasEventContent {
alias: Some(room_alias_id.to_owned()), alias: Some(room_alias_id.clone()),
alt_aliases: vec![], alt_aliases: vec![],
}) })
.expect("We checked that alias earlier, it must be fine"), .expect("We checked that alias earlier, it must be fine"),
@ -504,7 +505,7 @@ pub async fn upgrade_room_route(
) -> ConduitResult<upgrade_room::Response> { ) -> ConduitResult<upgrade_room::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { if !db.rooms.is_supported_version(&db, &body.new_version) {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::UnsupportedRoomVersion, ErrorKind::UnsupportedRoomVersion,
"This server does not support that room version.", "This server does not support that room version.",
@ -571,7 +572,7 @@ pub async fn upgrade_room_route(
// Use the m.room.tombstone event as the predecessor // Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new( let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
body.room_id.clone(), body.room_id.clone(),
(*tombstone_event_id).to_owned(), tombstone_event_id,
)); ));
// Send a m.room.create event containing a predecessor field and the applicable room_version // Send a m.room.create event containing a predecessor field and the applicable room_version
@ -601,7 +602,7 @@ pub async fn upgrade_room_route(
.get(), .get(),
); );
if de_result.is_err() { if let Err(_) = de_result {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::BadJson, ErrorKind::BadJson,
"Error forming creation event", "Error forming creation event",
@ -635,7 +636,6 @@ pub async fn upgrade_room_route(
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -700,7 +700,10 @@ pub async fn upgrade_room_route(
.map_err(|_| Error::bad_database("Invalid room event in database."))?; .map_err(|_| Error::bad_database("Invalid room event in database."))?;
// Setting events_default and invite to the greater of 50 and users_default + 1 // Setting events_default and invite to the greater of 50 and users_default + 1
let new_level = max(int!(50), power_levels_event_content.users_default + int!(1)); let new_level = max(
50.into(),
power_levels_event_content.users_default + 1.into(),
);
power_levels_event_content.events_default = new_level; power_levels_event_content.events_default = new_level;
power_levels_event_content.invite = new_level; power_levels_event_content.invite = new_level;

6
src/client_server/state.rs

@ -52,7 +52,6 @@ pub async fn send_state_event_for_key_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::Response { event_id }.into()) Ok(send_state_event::Response { event_id }.into())
} }
@ -94,7 +93,6 @@ pub async fn send_state_event_for_empty_key_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::Response { event_id }.into()) Ok(send_state_event::Response { event_id }.into())
} }
@ -269,7 +267,7 @@ async fn send_state_event_for_key_helper(
event_type: EventType, event_type: EventType,
json: &Raw<AnyStateEventContent>, json: &Raw<AnyStateEventContent>,
state_key: String, state_key: String,
) -> Result<Arc<EventId>> { ) -> Result<EventId> {
let sender_user = sender; let sender_user = sender;
// TODO: Review this check, error if event is unparsable, use event type, allow alias if it // TODO: Review this check, error if event is unparsable, use event type, allow alias if it
@ -305,7 +303,7 @@ async fn send_state_event_for_key_helper(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.to_owned()) .entry(room_id.clone())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;

17
src/client_server/sync.rs

@ -10,7 +10,7 @@ use ruma::{
}; };
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::TryInto, convert::{TryFrom, TryInto},
sync::Arc, sync::Arc,
time::Duration, time::Duration,
}; };
@ -61,9 +61,8 @@ pub async fn sync_events_route(
db: DatabaseGuard, db: DatabaseGuard,
body: Ruma<sync_events::Request<'_>>, body: Ruma<sync_events::Request<'_>>,
) -> Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> { ) -> Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let body = body.body;
let arc_db = Arc::new(db); let arc_db = Arc::new(db);
@ -133,7 +132,7 @@ pub async fn sync_events_route(
async fn sync_helper_wrapper( async fn sync_helper_wrapper(
db: Arc<DatabaseGuard>, db: Arc<DatabaseGuard>,
sender_user: Box<UserId>, sender_user: UserId,
sender_device: Box<DeviceId>, sender_device: Box<DeviceId>,
since: Option<String>, since: Option<String>,
full_state: bool, full_state: bool,
@ -177,7 +176,7 @@ async fn sync_helper_wrapper(
async fn sync_helper( async fn sync_helper(
db: Arc<DatabaseGuard>, db: Arc<DatabaseGuard>,
sender_user: Box<UserId>, sender_user: UserId,
sender_device: Box<DeviceId>, sender_device: Box<DeviceId>,
since: Option<String>, since: Option<String>,
full_state: bool, full_state: bool,
@ -297,7 +296,7 @@ async fn sync_helper(
})?; })?;
if let Some(state_key) = &pdu.state_key { if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone()).map_err(|_| { let user_id = UserId::try_from(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.") Error::bad_database("Invalid UserId in member PDU.")
})?; })?;
@ -425,7 +424,7 @@ async fn sync_helper(
} }
if let Some(state_key) = &state_event.state_key { if let Some(state_key) = &state_event.state_key {
let user_id = UserId::parse(state_key.clone()) let user_id = UserId::try_from(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user { if user_id == sender_user {
@ -794,7 +793,7 @@ fn share_encrypted_room(
) -> Result<bool> { ) -> Result<bool> {
Ok(db Ok(db
.rooms .rooms
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
.filter(|room_id| room_id != ignore_room) .filter(|room_id| room_id != ignore_room)
.filter_map(|other_room_id| { .filter_map(|other_room_id| {

6
src/client_server/voip.rs

@ -26,7 +26,7 @@ pub async fn turn_server_route(
let turn_secret = db.globals.turn_secret(); let turn_secret = db.globals.turn_secret();
let (username, password) = if !turn_secret.is_empty() { let (username, password) = if turn_secret != "" {
let expiry = SecondsSinceUnixEpoch::from_system_time( let expiry = SecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()), SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
) )
@ -49,8 +49,8 @@ pub async fn turn_server_route(
}; };
Ok(get_turn_server_info::Response { Ok(get_turn_server_info::Response {
username, username: username,
password, password: password,
uris: db.globals.turn_uris().to_vec(), uris: db.globals.turn_uris().to_vec(),
ttl: Duration::from_secs(db.globals.turn_ttl()), ttl: Duration::from_secs(db.globals.turn_ttl()),
} }

30
src/database.rs

@ -24,7 +24,7 @@ use rocket::{
request::{FromRequest, Request}, request::{FromRequest, Request},
Shutdown, State, Shutdown, State,
}; };
use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; use ruma::{DeviceId, EventId, RoomId, RoomVersionId, ServerName, UserId};
use serde::{de::IgnoredAny, Deserialize}; use serde::{de::IgnoredAny, Deserialize};
use std::{ use std::{
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, HashMap, HashSet},
@ -63,8 +63,12 @@ pub struct Config {
allow_federation: bool, allow_federation: bool,
#[serde(default = "true_fn")] #[serde(default = "true_fn")]
allow_room_creation: bool, allow_room_creation: bool,
#[serde(default = "true_fn")]
allow_unstable_room_versions: bool,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub allow_jaeger: bool, pub allow_jaeger: bool,
#[serde(default = "default_room_version")]
default_room_version: RoomVersionId,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub tracing_flame: bool, pub tracing_flame: bool,
#[serde(default)] #[serde(default)]
@ -137,6 +141,10 @@ fn default_max_concurrent_requests() -> u16 {
100 100
} }
fn default_room_version() -> RoomVersionId {
RoomVersionId::Version6
}
fn default_log() -> String { fn default_log() -> String {
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned() "info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
} }
@ -476,9 +484,10 @@ impl Database {
if db.globals.database_version()? < 6 { if db.globals.database_version()? < 6 {
// Set room member count // Set room member count
for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { for (roomid, _) in db.rooms.roomid_shortstatehash.iter() {
let string = utils::string_from_bytes(&roomid).unwrap(); let room_id =
let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap();
db.rooms.update_joined_count(room_id, &db)?;
db.rooms.update_joined_count(&room_id, &db)?;
} }
db.globals.bump_database_version(6)?; db.globals.bump_database_version(6)?;
@ -488,7 +497,7 @@ impl Database {
if db.globals.database_version()? < 7 { if db.globals.database_version()? < 7 {
// Upgrade state store // Upgrade state store
let mut last_roomstates: HashMap<Box<RoomId>, u64> = HashMap::new(); let mut last_roomstates: HashMap<RoomId, u64> = HashMap::new();
let mut current_sstatehash: Option<u64> = None; let mut current_sstatehash: Option<u64> = None;
let mut current_room = None; let mut current_room = None;
let mut current_state = HashSet::new(); let mut current_state = HashSet::new();
@ -569,7 +578,7 @@ impl Database {
if let Some(current_sstatehash) = current_sstatehash { if let Some(current_sstatehash) = current_sstatehash {
handle_state( handle_state(
current_sstatehash, current_sstatehash,
current_room.as_deref().unwrap(), current_room.as_ref().unwrap(),
current_state, current_state,
&mut last_roomstates, &mut last_roomstates,
)?; )?;
@ -585,9 +594,10 @@ impl Database {
.get(&seventid) .get(&seventid)
.unwrap() .unwrap()
.unwrap(); .unwrap();
let string = utils::string_from_bytes(&event_id).unwrap(); let event_id =
let event_id = <&EventId>::try_from(string.as_str()).unwrap(); EventId::try_from(utils::string_from_bytes(&event_id).unwrap())
let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap(); .unwrap();
let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap();
if Some(&pdu.room_id) != current_room.as_ref() { if Some(&pdu.room_id) != current_room.as_ref() {
current_room = Some(pdu.room_id.clone()); current_room = Some(pdu.room_id.clone());
@ -602,7 +612,7 @@ impl Database {
if let Some(current_sstatehash) = current_sstatehash { if let Some(current_sstatehash) = current_sstatehash {
handle_state( handle_state(
current_sstatehash, current_sstatehash,
current_room.as_deref().unwrap(), current_room.as_ref().unwrap(),
current_state, current_state,
&mut last_roomstates, &mut last_roomstates,
)?; )?;

11
src/database/admin.rs

@ -1,4 +1,7 @@
use std::{convert::TryInto, sync::Arc}; use std::{
convert::{TryFrom, TryInto},
sync::Arc,
};
use crate::{pdu::PduBuilder, Database}; use crate::{pdu::PduBuilder, Database};
use rocket::futures::{channel::mpsc, stream::StreamExt}; use rocket::futures::{channel::mpsc, stream::StreamExt};
@ -33,14 +36,14 @@ impl Admin {
let guard = db.read().await; let guard = db.read().await;
let conduit_user = UserId::parse(format!("@conduit:{}", guard.globals.server_name())) let conduit_user =
UserId::try_from(format!("@conduit:{}", guard.globals.server_name()))
.expect("@conduit:server_name is valid"); .expect("@conduit:server_name is valid");
let conduit_room = guard let conduit_room = guard
.rooms .rooms
.id_from_alias( .id_from_alias(
format!("#admins:{}", guard.globals.server_name()) &format!("#admins:{}", guard.globals.server_name())
.as_str()
.try_into() .try_into()
.expect("#admins:server_name is a valid room alias"), .expect("#admins:server_name is a valid room alias"),
) )

57
src/database/globals.rs

@ -4,7 +4,8 @@ use ruma::{
client::r0::sync::sync_events, client::r0::sync::sync_events,
federation::discovery::{ServerSigningKeys, VerifyKey}, federation::discovery::{ServerSigningKeys, VerifyKey},
}, },
DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId, DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName,
ServerSigningKeyId, UserId,
}; };
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
@ -39,14 +40,16 @@ pub struct Globals {
keypair: Arc<ruma::signatures::Ed25519KeyPair>, keypair: Arc<ruma::signatures::Ed25519KeyPair>,
dns_resolver: TokioAsyncResolver, dns_resolver: TokioAsyncResolver,
jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>, jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>,
pub stable_room_versions: Vec<RoomVersionId>,
pub unstable_room_versions: Vec<RoomVersionId>,
pub(super) server_signingkeys: Arc<dyn Tree>, pub(super) server_signingkeys: Arc<dyn Tree>,
pub bad_event_ratelimiter: Arc<RwLock<HashMap<Box<EventId>, RateLimitState>>>, pub bad_event_ratelimiter: Arc<RwLock<HashMap<EventId, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>, pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<HashMap<Box<ServerName>, Arc<Semaphore>>>>, pub servername_ratelimiter: Arc<RwLock<HashMap<Box<ServerName>, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<HashMap<(Box<UserId>, Box<DeviceId>), SyncHandle>>, pub sync_receivers: RwLock<HashMap<(UserId, Box<DeviceId>), SyncHandle>>,
pub roomid_mutex_insert: RwLock<HashMap<Box<RoomId>, Arc<Mutex<()>>>>, pub roomid_mutex_insert: RwLock<HashMap<RoomId, Arc<Mutex<()>>>>,
pub roomid_mutex_state: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>, pub roomid_mutex_state: RwLock<HashMap<RoomId, Arc<TokioMutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>, // this lock will be held longer pub roomid_mutex_federation: RwLock<HashMap<RoomId, Arc<TokioMutex<()>>>>, // this lock will be held longer
pub rotate: RotationHandler, pub rotate: RotationHandler,
} }
@ -132,7 +135,16 @@ impl Globals {
.as_ref() .as_ref()
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static());
let s = Self { // Supported and stable room versions
let stable_room_versions = vec![RoomVersionId::Version6];
// Experimental, partially supported room versions
let unstable_room_versions = vec![
RoomVersionId::Version3,
RoomVersionId::Version4,
RoomVersionId::Version5,
];
let mut s = Self {
globals, globals,
config, config,
keypair: Arc::new(keypair), keypair: Arc::new(keypair),
@ -143,6 +155,8 @@ impl Globals {
tls_name_override, tls_name_override,
server_signingkeys, server_signingkeys,
jwt_decoding_key, jwt_decoding_key,
stable_room_versions,
unstable_room_versions,
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
@ -155,6 +169,14 @@ impl Globals {
fs::create_dir_all(s.get_media_folder())?; fs::create_dir_all(s.get_media_folder())?;
if !s
.supported_room_versions()
.contains(&s.config.default_room_version)
{
error!("Room version in config isn't supported, falling back to Version 6");
s.config.default_room_version = RoomVersionId::Version6;
};
Ok(s) Ok(s)
} }
@ -214,6 +236,14 @@ impl Globals {
self.config.allow_room_creation self.config.allow_room_creation
} }
pub fn allow_unstable_room_versions(&self) -> bool {
self.config.allow_unstable_room_versions
}
pub fn default_room_version(&self) -> RoomVersionId {
self.config.default_room_version.clone()
}
pub fn trusted_servers(&self) -> &[Box<ServerName>] { pub fn trusted_servers(&self) -> &[Box<ServerName>] {
&self.config.trusted_servers &self.config.trusted_servers
} }
@ -246,6 +276,15 @@ impl Globals {
&self.config.turn_secret &self.config.turn_secret
} }
pub fn supported_room_versions(&self) -> Vec<RoomVersionId> {
let mut room_versions: Vec<RoomVersionId> = vec![];
room_versions.extend(self.stable_room_versions.clone());
if self.allow_unstable_room_versions() {
room_versions.extend(self.unstable_room_versions.clone());
};
room_versions
}
/// TODO: the key valid until timestamp is only honored in room version > 4 /// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones. /// Remove the outdated keys and insert the new ones.
/// ///
@ -254,7 +293,7 @@ impl Globals {
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<BTreeMap<Box<ServerSigningKeyId>, VerifyKey>> { ) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> {
// Not atomic, but this is not critical // Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
@ -293,7 +332,7 @@ impl Globals {
pub fn signing_keys_for( pub fn signing_keys_for(
&self, &self,
origin: &ServerName, origin: &ServerName,
) -> Result<BTreeMap<Box<ServerSigningKeyId>, VerifyKey>> { ) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> {
let signingkeys = self let signingkeys = self
.server_signingkeys .server_signingkeys
.get(origin.as_bytes())? .get(origin.as_bytes())?

8
src/database/key_backups.rs

@ -6,7 +6,7 @@ use ruma::{
}, },
RoomId, UserId, RoomId, UserId,
}; };
use std::{collections::BTreeMap, sync::Arc}; use std::{collections::BTreeMap, convert::TryFrom, sync::Arc};
use super::abstraction::Tree; use super::abstraction::Tree;
@ -209,13 +209,13 @@ impl KeyBackups {
&self, &self,
user_id: &UserId, user_id: &UserId,
version: &str, version: &str,
) -> Result<BTreeMap<Box<RoomId>, RoomKeyBackup>> { ) -> Result<BTreeMap<RoomId, RoomKeyBackup>> {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
prefix.extend_from_slice(version.as_bytes()); prefix.extend_from_slice(version.as_bytes());
prefix.push(0xff); prefix.push(0xff);
let mut rooms = BTreeMap::<Box<RoomId>, RoomKeyBackup>::new(); let mut rooms = BTreeMap::<RoomId, RoomKeyBackup>::new();
for result in self for result in self
.backupkeyid_backup .backupkeyid_backup
@ -231,7 +231,7 @@ impl KeyBackups {
Error::bad_database("backupkeyid_backup session_id is invalid.") Error::bad_database("backupkeyid_backup session_id is invalid.")
})?; })?;
let room_id = RoomId::parse( let room_id = RoomId::try_from(
utils::string_from_bytes(parts.next().ok_or_else(|| { utils::string_from_bytes(parts.next().ok_or_else(|| {
Error::bad_database("backupkeyid_backup key is invalid.") Error::bad_database("backupkeyid_backup key is invalid.")
})?) })?)

4
src/database/pusher.rs

@ -234,7 +234,7 @@ pub fn get_actions<'a>(
db: &Database, db: &Database,
) -> Result<&'a [Action]> { ) -> Result<&'a [Action]> {
let ctx = PushConditionRoomCtx { let ctx = PushConditionRoomCtx {
room_id: room_id.to_owned(), room_id: room_id.clone(),
member_count: 10_u32.into(), // TODO: get member count efficiently member_count: 10_u32.into(), // TODO: get member count efficiently
user_display_name: db user_display_name: db
.users .users
@ -277,7 +277,7 @@ async fn send_notice(
let mut data_minus_url = pusher.data.clone(); let mut data_minus_url = pusher.data.clone();
// The url must be stripped off according to spec // The url must be stripped off according to spec
data_minus_url.url = None; data_minus_url.url = None;
device.data = data_minus_url; device.data = Some(data_minus_url);
// Tweaks are only added if the format is NOT event_id_only // Tweaks are only added if the format is NOT event_id_only
if !event_id_only { if !event_id_only {

180
src/database/rooms.rs

@ -36,8 +36,6 @@ use std::{
borrow::Cow, borrow::Cow,
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
fmt::Debug,
iter,
mem::size_of, mem::size_of,
sync::{Arc, Mutex, RwLock}, sync::{Arc, Mutex, RwLock},
time::Instant, time::Instant,
@ -109,14 +107,14 @@ pub struct Rooms {
/// RoomId + EventId -> Parent PDU EventId. /// RoomId + EventId -> Parent PDU EventId.
pub(super) referencedevents: Arc<dyn Tree>, pub(super) referencedevents: Arc<dyn Tree>,
pub(super) pdu_cache: Mutex<LruCache<Box<EventId>, Arc<PduEvent>>>, pub(super) pdu_cache: Mutex<LruCache<EventId, Arc<PduEvent>>>,
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>, pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>, pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
pub(super) eventidshort_cache: Mutex<LruCache<Box<EventId>, u64>>, pub(super) eventidshort_cache: Mutex<LruCache<EventId, u64>>,
pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>, pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>,
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>, pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>,
pub(super) our_real_users_cache: RwLock<HashMap<Box<RoomId>, Arc<HashSet<Box<UserId>>>>>, pub(super) our_real_users_cache: RwLock<HashMap<RoomId, Arc<HashSet<UserId>>>>,
pub(super) appservice_in_room_cache: RwLock<HashMap<Box<RoomId>, HashMap<String, bool>>>, pub(super) appservice_in_room_cache: RwLock<HashMap<RoomId, HashMap<String, bool>>>,
pub(super) stateinfo_cache: Mutex< pub(super) stateinfo_cache: Mutex<
LruCache< LruCache<
u64, u64,
@ -131,6 +129,12 @@ pub struct Rooms {
} }
impl Rooms { impl Rooms {
/// Returns true if a given room version is supported
#[tracing::instrument(skip(self, db))]
pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool {
db.globals.supported_room_versions().contains(room_version)
}
/// Builds a StateMap by iterating over all keys that start /// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash. /// with state_hash, this gives the full state for the given state_hash.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
@ -436,7 +440,7 @@ impl Rooms {
None => continue, None => continue,
}; };
let user_id = match UserId::parse(state_key) { let user_id = match UserId::try_from(state_key) {
Ok(id) => id, Ok(id) => id,
Err(_) => continue, Err(_) => continue,
}; };
@ -744,7 +748,7 @@ impl Rooms {
self.eventidshort_cache self.eventidshort_cache
.lock() .lock()
.unwrap() .unwrap()
.insert(event_id.to_owned(), short); .insert(event_id.clone(), short);
Ok(short) Ok(short)
} }
@ -873,10 +877,12 @@ impl Rooms {
.get(&shorteventid.to_be_bytes())? .get(&shorteventid.to_be_bytes())?
.ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?;
let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { let event_id = Arc::new(
EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") Error::bad_database("EventID in shorteventid_eventid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?,
);
self.shorteventid_cache self.shorteventid_cache
.lock() .lock()
@ -1112,7 +1118,7 @@ impl Rooms {
self.pdu_cache self.pdu_cache
.lock() .lock()
.unwrap() .unwrap()
.insert(event_id.to_owned(), Arc::clone(&pdu)); .insert(event_id.clone(), Arc::clone(&pdu));
Ok(Some(pdu)) Ok(Some(pdu))
} else { } else {
Ok(None) Ok(None)
@ -1162,14 +1168,14 @@ impl Rooms {
/// Returns the leaf pdus of a room. /// Returns the leaf pdus of a room.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result<HashSet<Arc<EventId>>> { pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result<HashSet<EventId>> {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.roomid_pduleaves self.roomid_pduleaves
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(_, bytes)| { .map(|(_, bytes)| {
EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") Error::bad_database("EventID in roomid_pduleaves is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))
@ -1178,7 +1184,7 @@ impl Rooms {
} }
#[tracing::instrument(skip(self, room_id, event_ids))] #[tracing::instrument(skip(self, room_id, event_ids))]
pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc<EventId>]) -> Result<()> { pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> {
for prev in event_ids { for prev in event_ids {
let mut key = room_id.as_bytes().to_vec(); let mut key = room_id.as_bytes().to_vec();
key.extend_from_slice(prev.as_bytes()); key.extend_from_slice(prev.as_bytes());
@ -1193,11 +1199,7 @@ impl Rooms {
/// The provided `event_ids` become the new leaves, this allows a room to have multiple /// The provided `event_ids` become the new leaves, this allows a room to have multiple
/// `prev_events`. /// `prev_events`.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn replace_pdu_leaves<'a>( pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> {
&self,
room_id: &RoomId,
event_ids: impl IntoIterator<Item = &'a EventId> + Debug,
) -> Result<()> {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -1261,11 +1263,11 @@ impl Rooms {
/// ///
/// Returns pdu id /// Returns pdu id
#[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))]
pub fn append_pdu<'a>( pub fn append_pdu(
&self, &self,
pdu: &PduEvent, pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject, mut pdu_json: CanonicalJsonObject,
leaves: impl IntoIterator<Item = &'a EventId> + Debug, leaves: &[EventId],
db: &Database, db: &Database,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists");
@ -1424,7 +1426,7 @@ impl Rooms {
} }
// if the state_key fails // if the state_key fails
let target_user_id = UserId::parse(state_key.clone()) let target_user_id = UserId::try_from(state_key.clone())
.expect("This state_key was previously validated"); .expect("This state_key was previously validated");
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get()) let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
@ -1480,9 +1482,8 @@ impl Rooms {
if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name()))
&& self && self
.id_from_alias( .id_from_alias(
<&RoomAliasId>::try_from( &format!("#admins:{}", db.globals.server_name())
format!("#admins:{}", db.globals.server_name()).as_str(), .try_into()
)
.expect("#admins:server_name is a valid room alias"), .expect("#admins:server_name is a valid room alias"),
)? )?
.as_ref() .as_ref()
@ -1533,7 +1534,7 @@ impl Rooms {
} }
"get_auth_chain" => { "get_auth_chain" => {
if args.len() == 1 { if args.len() == 1 {
if let Ok(event_id) = EventId::parse_arc(args[0]) { if let Ok(event_id) = EventId::try_from(args[0]) {
if let Some(event) = db.rooms.get_pdu_json(&event_id)? { if let Some(event) = db.rooms.get_pdu_json(&event_id)? {
let room_id_str = event let room_id_str = event
.get("room_id") .get("room_id")
@ -1544,12 +1545,12 @@ impl Rooms {
) )
})?; })?;
let room_id = <&RoomId>::try_from(room_id_str) let room_id = RoomId::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let start = Instant::now(); let start = Instant::now();
let count = server_server::get_auth_chain( let count = server_server::get_auth_chain(
room_id, &room_id,
vec![event_id], vec![Arc::new(event_id)],
db, db,
)? )?
.count(); .count();
@ -1572,12 +1573,12 @@ impl Rooms {
let string = body[1..body.len() - 1].join("\n"); let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) { match serde_json::from_str(&string) {
Ok(value) => { Ok(value) => {
let event_id = EventId::parse(format!( let event_id = EventId::try_from(&*format!(
"${}", "${}",
// Anything higher than version3 behaves the same // Anything higher than version3 behaves the same
ruma::signatures::reference_hash( ruma::signatures::reference_hash(
&value, &value,
&RoomVersionId::V6 &RoomVersionId::Version6
) )
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) ))
@ -1627,7 +1628,7 @@ impl Rooms {
} }
"get_pdu" => { "get_pdu" => {
if args.len() == 1 { if args.len() == 1 {
if let Ok(event_id) = EventId::parse(args[0]) { if let Ok(event_id) = EventId::try_from(args[0]) {
let mut outlier = false; let mut outlier = false;
let mut pdu_json = let mut pdu_json =
db.rooms.get_non_outlier_pdu_json(&event_id)?; db.rooms.get_non_outlier_pdu_json(&event_id)?;
@ -1953,7 +1954,7 @@ impl Rooms {
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
) -> Result<Arc<EventId>> { ) -> Result<EventId> {
let PduBuilder { let PduBuilder {
event_type, event_type,
content, content,
@ -1988,9 +1989,12 @@ impl Rooms {
None None
}; };
// If there was no create event yet, assume we are creating a version 6 room right now // If there was no create event yet, assume we are creating a room with the default
// version right now
let room_version_id = create_event_content let room_version_id = create_event_content
.map_or(RoomVersionId::V6, |create_event| create_event.room_version); .map_or(db.globals.default_room_version(), |create_event| {
create_event.room_version
});
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
let auth_events = let auth_events =
@ -2019,9 +2023,9 @@ impl Rooms {
} }
let mut pdu = PduEvent { let mut pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(), event_id: ruma::event_id!("$thiswillbefilledinlater"),
room_id: room_id.to_owned(), room_id: room_id.clone(),
sender: sender.to_owned(), sender: sender.clone(),
origin_server_ts: utils::millis_since_unix_epoch() origin_server_ts: utils::millis_since_unix_epoch()
.try_into() .try_into()
.expect("time is valid"), .expect("time is valid"),
@ -2086,7 +2090,7 @@ impl Rooms {
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
// Generate event id // Generate event id
pdu.event_id = EventId::parse_arc(format!( pdu.event_id = EventId::try_from(&*format!(
"${}", "${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id) ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -2110,7 +2114,7 @@ impl Rooms {
pdu_json, pdu_json,
// Since this PDU references all pdu_leaves we can update the leaves // Since this PDU references all pdu_leaves we can update the leaves
// of the room // of the room
iter::once(&*pdu.event_id), &[pdu.event_id.clone()],
db, db,
)?; )?;
@ -2209,7 +2213,7 @@ impl Rooms {
let mut first_pdu_id = prefix.clone(); let mut first_pdu_id = prefix.clone();
first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes());
let user_id = user_id.to_owned(); let user_id = user_id.clone();
Ok(self Ok(self
.pduid_pdu .pduid_pdu
@ -2246,7 +2250,7 @@ impl Rooms {
let current: &[u8] = &current; let current: &[u8] = &current;
let user_id = user_id.to_owned(); let user_id = user_id.clone();
Ok(self Ok(self
.pduid_pdu .pduid_pdu
@ -2283,7 +2287,7 @@ impl Rooms {
let current: &[u8] = &current; let current: &[u8] = &current;
let user_id = user_id.to_owned(); let user_id = user_id.clone();
Ok(self Ok(self
.pduid_pdu .pduid_pdu
@ -2415,7 +2419,7 @@ impl Rooms {
for room_ids in direct_event.content.0.values_mut() { for room_ids in direct_event.content.0.values_mut() {
if room_ids.iter().any(|r| r == &predecessor.room_id) { if room_ids.iter().any(|r| r == &predecessor.room_id) {
room_ids.push(room_id.to_owned()); room_ids.push(room_id.clone());
room_ids_updated = true; room_ids_updated = true;
} }
} }
@ -2454,11 +2458,7 @@ impl Rooms {
EventType::IgnoredUserList, EventType::IgnoredUserList,
)? )?
.map_or(false, |ignored| { .map_or(false, |ignored| {
ignored ignored.content.ignored_users.contains(sender)
.content
.ignored_users
.iter()
.any(|user| user == sender)
}); });
if is_ignored { if is_ignored {
@ -2544,7 +2544,7 @@ impl Rooms {
self.our_real_users_cache self.our_real_users_cache
.write() .write()
.unwrap() .unwrap()
.insert(room_id.to_owned(), Arc::new(real_users)); .insert(room_id.clone(), Arc::new(real_users));
for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) {
if !joined_servers.remove(&old_joined_server) { if !joined_servers.remove(&old_joined_server) {
@ -2589,7 +2589,7 @@ impl Rooms {
&self, &self,
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
) -> Result<Arc<HashSet<Box<UserId>>>> { ) -> Result<Arc<HashSet<UserId>>> {
let maybe = self let maybe = self
.our_real_users_cache .our_real_users_cache
.read() .read()
@ -2657,7 +2657,7 @@ impl Rooms {
self.appservice_in_room_cache self.appservice_in_room_cache
.write() .write()
.unwrap() .unwrap()
.entry(room_id.to_owned()) .entry(room_id.clone())
.or_default() .or_default()
.insert(appservice.0.clone(), in_room); .insert(appservice.0.clone(), in_room);
@ -2701,7 +2701,7 @@ impl Rooms {
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.to_owned()) .entry(room_id.clone())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -2761,7 +2761,7 @@ impl Rooms {
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::parse(sender).ok()) .filter_map(|sender| UserId::try_from(sender).ok())
.map(|user| user.server_name().to_owned()) .map(|user| user.server_name().to_owned())
.collect(); .collect();
@ -2785,9 +2785,7 @@ impl Rooms {
let (make_leave_response, remote_server) = make_leave_response_and_server?; let (make_leave_response, remote_server) = make_leave_response_and_server?;
let room_version_id = match make_leave_response.room_version { let room_version_id = match make_leave_response.room_version {
Some(version) if version == RoomVersionId::V5 || version == RoomVersionId::V6 => { Some(version) if self.is_supported_version(&db, &version) => version,
version
}
_ => return Err(Error::BadServerResponse("Room version is not supported")), _ => return Err(Error::BadServerResponse("Room version is not supported")),
}; };
@ -2822,7 +2820,7 @@ impl Rooms {
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
// Generate event id // Generate event id
let event_id = EventId::parse(format!( let event_id = EventId::try_from(&*format!(
"${}", "${}",
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -2907,11 +2905,11 @@ impl Rooms {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result<Option<Box<RoomId>>> { pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result<Option<RoomId>> {
self.alias_roomid self.alias_roomid
.get(alias.alias().as_bytes())? .get(alias.alias().as_bytes())?
.map(|bytes| { .map(|bytes| {
RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Room ID in alias_roomid is invalid unicode.") Error::bad_database("Room ID in alias_roomid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid."))
@ -2923,7 +2921,7 @@ impl Rooms {
pub fn room_aliases<'a>( pub fn room_aliases<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<Box<RoomAliasId>>> + 'a { ) -> impl Iterator<Item = Result<RoomAliasId>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -2952,9 +2950,9 @@ impl Rooms {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn public_rooms(&self) -> impl Iterator<Item = Result<Box<RoomId>>> + '_ { pub fn public_rooms(&self) -> impl Iterator<Item = Result<RoomId>> + '_ {
self.publicroomids.iter().map(|(bytes, _)| { self.publicroomids.iter().map(|(bytes, _)| {
RoomId::parse( RoomId::try_from(
utils::string_from_bytes(&bytes).map_err(|_| { utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Room ID in publicroomids is invalid unicode.") Error::bad_database("Room ID in publicroomids is invalid unicode.")
})?, })?,
@ -3014,8 +3012,8 @@ impl Rooms {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn get_shared_rooms<'a>( pub fn get_shared_rooms<'a>(
&'a self, &'a self,
users: Vec<Box<UserId>>, users: Vec<UserId>,
) -> Result<impl Iterator<Item = Result<Box<RoomId>>> + 'a> { ) -> Result<impl Iterator<Item = Result<RoomId>> + 'a> {
let iterators = users.into_iter().map(move |user_id| { let iterators = users.into_iter().map(move |user_id| {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -3042,7 +3040,7 @@ impl Rooms {
Ok(utils::common_elements(iterators, Ord::cmp) Ok(utils::common_elements(iterators, Ord::cmp)
.expect("users is not empty") .expect("users is not empty")
.map(|bytes| { .map(|bytes| {
RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { RoomId::try_from(utils::string_from_bytes(&*bytes).map_err(|_| {
Error::bad_database("Invalid RoomId bytes in userroomid_joined") Error::bad_database("Invalid RoomId bytes in userroomid_joined")
})?) })?)
.map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined."))
@ -3059,7 +3057,7 @@ impl Rooms {
prefix.push(0xff); prefix.push(0xff);
self.roomserverids.scan_prefix(prefix).map(|(key, _)| { self.roomserverids.scan_prefix(prefix).map(|(key, _)| {
ServerName::parse( Box::<ServerName>::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3087,12 +3085,12 @@ impl Rooms {
pub fn server_rooms<'a>( pub fn server_rooms<'a>(
&'a self, &'a self,
server: &ServerName, server: &ServerName,
) -> impl Iterator<Item = Result<Box<RoomId>>> + 'a { ) -> impl Iterator<Item = Result<RoomId>> + 'a {
let mut prefix = server.as_bytes().to_vec(); let mut prefix = server.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.serverroomids.scan_prefix(prefix).map(|(key, _)| { self.serverroomids.scan_prefix(prefix).map(|(key, _)| {
RoomId::parse( RoomId::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3109,12 +3107,12 @@ impl Rooms {
pub fn room_members<'a>( pub fn room_members<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<Box<UserId>>> + 'a { ) -> impl Iterator<Item = Result<UserId>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| {
UserId::parse( UserId::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3155,14 +3153,14 @@ impl Rooms {
pub fn room_useroncejoined<'a>( pub fn room_useroncejoined<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<Box<UserId>>> + 'a { ) -> impl Iterator<Item = Result<UserId>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.roomuseroncejoinedids self.roomuseroncejoinedids
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(key, _)| { .map(|(key, _)| {
UserId::parse( UserId::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3181,14 +3179,14 @@ impl Rooms {
pub fn room_members_invited<'a>( pub fn room_members_invited<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
) -> impl Iterator<Item = Result<Box<UserId>>> + 'a { ) -> impl Iterator<Item = Result<UserId>> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.roomuserid_invitecount self.roomuserid_invitecount
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(key, _)| { .map(|(key, _)| {
UserId::parse( UserId::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3237,11 +3235,11 @@ impl Rooms {
pub fn rooms_joined<'a>( pub fn rooms_joined<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
) -> impl Iterator<Item = Result<Box<RoomId>>> + 'a { ) -> impl Iterator<Item = Result<RoomId>> + 'a {
self.userroomid_joined self.userroomid_joined
.scan_prefix(user_id.as_bytes().to_vec()) .scan_prefix(user_id.as_bytes().to_vec())
.map(|(key, _)| { .map(|(key, _)| {
RoomId::parse( RoomId::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3260,14 +3258,14 @@ impl Rooms {
pub fn rooms_invited<'a>( pub fn rooms_invited<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
) -> impl Iterator<Item = Result<(Box<RoomId>, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a { ) -> impl Iterator<Item = Result<(RoomId, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.userroomid_invitestate self.userroomid_invitestate
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(key, state)| { .map(|(key, state)| {
let room_id = RoomId::parse( let room_id = RoomId::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3333,14 +3331,14 @@ impl Rooms {
pub fn rooms_left<'a>( pub fn rooms_left<'a>(
&'a self, &'a self,
user_id: &UserId, user_id: &UserId,
) -> impl Iterator<Item = Result<(Box<RoomId>, Vec<Raw<AnySyncStateEvent>>)>> + 'a { ) -> impl Iterator<Item = Result<(RoomId, Vec<Raw<AnySyncStateEvent>>)>> + 'a {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
self.userroomid_leftstate self.userroomid_leftstate
.scan_prefix(prefix) .scan_prefix(prefix)
.map(|(key, state)| { .map(|(key, state)| {
let room_id = RoomId::parse( let room_id = RoomId::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -3453,4 +3451,24 @@ impl Rooms {
Ok(()) Ok(())
} }
/// Returns the room's version.
#[tracing::instrument(skip(self))]
pub fn get_room_version(&self, room_id: &RoomId) -> Result<RoomVersionId> {
let create_event = self.room_state_get(room_id, &EventType::RoomCreate, "")?;
let create_event_content: Option<RoomCreateEventContent> = create_event
.as_ref()
.map(|create_event| {
serde_json::from_str(create_event.content.get()).map_err(|e| {
warn!("Invalid create event: {}", e);
Error::bad_database("Invalid create event in db.")
})
})
.transpose()?;
let room_version = create_event_content
.map(|create_event| create_event.room_version)
.ok_or_else(|| Error::BadDatabase("Invalid room version"))?;
Ok(room_version)
}
} }

35
src/database/rooms/edus.rs

@ -11,7 +11,7 @@ use ruma::{
}; };
use std::{ use std::{
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
convert::TryInto, convert::{TryFrom, TryInto},
mem, mem,
sync::Arc, sync::Arc,
}; };
@ -76,13 +76,8 @@ impl RoomEdus {
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
since: u64, since: u64,
) -> impl Iterator< ) -> impl Iterator<Item = Result<(UserId, u64, Raw<ruma::events::AnySyncEphemeralRoomEvent>)>> + 'a
Item = Result<( {
Box<UserId>,
u64,
Raw<ruma::events::AnySyncEphemeralRoomEvent>,
)>,
> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
let prefix2 = prefix.clone(); let prefix2 = prefix.clone();
@ -97,7 +92,7 @@ impl RoomEdus {
let count = let count =
utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::<u64>()]) utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::<u64>()])
.map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?;
let user_id = UserId::parse( let user_id = UserId::try_from(
utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..]) utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..])
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid readreceiptid userid bytes in db.") Error::bad_database("Invalid readreceiptid userid bytes in db.")
@ -310,13 +305,17 @@ impl RoomEdus {
let mut user_ids = HashSet::new(); let mut user_ids = HashSet::new();
for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { for user_id in self
let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { .typingid_userid
.scan_prefix(prefix)
.map(|(_, user_id)| {
UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| {
Error::bad_database("User ID in typingid_userid is invalid unicode.") Error::bad_database("User ID in typingid_userid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))
})
user_ids.insert(user_id); {
user_ids.insert(user_id?);
} }
Ok(SyncEphemeralRoomEvent { Ok(SyncEphemeralRoomEvent {
@ -450,7 +449,7 @@ impl RoomEdus {
{ {
// Send new presence events to set the user offline // Send new presence events to set the user offline
let count = globals.next_count()?.to_be_bytes(); let count = globals.next_count()?.to_be_bytes();
let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) let user_id = utils::string_from_bytes(&user_id_bytes)
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.")
})? })?
@ -476,7 +475,7 @@ impl RoomEdus {
presence: PresenceState::Offline, presence: PresenceState::Offline,
status_msg: None, status_msg: None,
}, },
sender: user_id.to_owned(), sender: user_id.clone(),
}) })
.expect("PresenceEvent can be serialized"), .expect("PresenceEvent can be serialized"),
)?; )?;
@ -499,7 +498,7 @@ impl RoomEdus {
since: u64, since: u64,
_rooms: &super::Rooms, _rooms: &super::Rooms,
_globals: &super::super::globals::Globals, _globals: &super::super::globals::Globals,
) -> Result<HashMap<Box<UserId>, PresenceEvent>> { ) -> Result<HashMap<UserId, PresenceEvent>> {
//self.presence_maintain(rooms, globals)?; //self.presence_maintain(rooms, globals)?;
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
@ -514,7 +513,7 @@ impl RoomEdus {
.iter_from(&*first_possible_edu, false) .iter_from(&*first_possible_edu, false)
.take_while(|(key, _)| key.starts_with(&prefix)) .take_while(|(key, _)| key.starts_with(&prefix))
{ {
let user_id = UserId::parse( let user_id = UserId::try_from(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()

11
src/database/sending.rs

@ -1,6 +1,6 @@
use std::{ use std::{
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, HashMap, HashSet},
convert::TryInto, convert::{TryFrom, TryInto},
fmt::Debug, fmt::Debug,
sync::Arc, sync::Arc,
time::{Duration, Instant}, time::{Duration, Instant},
@ -397,7 +397,7 @@ impl Sending {
// Because synapse resyncs, we can just insert dummy data // Because synapse resyncs, we can just insert dummy data
let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { let edu = Edu::DeviceListUpdate(DeviceListUpdateContent {
user_id, user_id,
device_id: device_id!("dummy").to_owned(), device_id: device_id!("dummy"),
device_display_name: Some("Dummy".to_owned()), device_display_name: Some("Dummy".to_owned()),
stream_id: uint!(1), stream_id: uint!(1),
prev_id: Vec::new(), prev_id: Vec::new(),
@ -583,7 +583,8 @@ impl Sending {
} }
} }
let userid = UserId::parse(utils::string_from_bytes(user).map_err(|_| { let userid =
UserId::try_from(utils::string_from_bytes(user).map_err(|_| {
( (
kind.clone(), kind.clone(),
Error::bad_database("Invalid push user string in db."), Error::bad_database("Invalid push user string in db."),
@ -731,7 +732,7 @@ impl Sending {
})?; })?;
( (
OutgoingKind::Appservice(ServerName::parse(server).map_err(|_| { OutgoingKind::Appservice(Box::<ServerName>::try_from(server).map_err(|_| {
Error::bad_database("Invalid server string in server_currenttransaction") Error::bad_database("Invalid server string in server_currenttransaction")
})?), })?),
if value.is_empty() { if value.is_empty() {
@ -770,7 +771,7 @@ impl Sending {
})?; })?;
( (
OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { OutgoingKind::Normal(Box::<ServerName>::try_from(server).map_err(|_| {
Error::bad_database("Invalid server string in server_currenttransaction") Error::bad_database("Invalid server string in server_currenttransaction")
})?), })?),
if value.is_empty() { if value.is_empty() {

32
src/database/users.rs

@ -8,7 +8,7 @@ use ruma::{
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt,
UserId, UserId,
}; };
use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc};
use tracing::warn; use tracing::warn;
use super::abstraction::Tree; use super::abstraction::Tree;
@ -62,11 +62,12 @@ impl Users {
rooms: &super::rooms::Rooms, rooms: &super::rooms::Rooms,
globals: &super::globals::Globals, globals: &super::globals::Globals,
) -> Result<bool> { ) -> Result<bool> {
let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) let admin_room_alias_id =
RoomAliasId::try_from(format!("#admins:{}", globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap();
rooms.is_joined(user_id, &admin_room_id) Ok(rooms.is_joined(user_id, &admin_room_id)?)
} }
/// Create a new user account on this homeserver. /// Create a new user account on this homeserver.
@ -84,7 +85,7 @@ impl Users {
/// Find out which user an access token belongs to. /// Find out which user an access token belongs to.
#[tracing::instrument(skip(self, token))] #[tracing::instrument(skip(self, token))]
pub fn find_from_token(&self, token: &str) -> Result<Option<(Box<UserId>, String)>> { pub fn find_from_token(&self, token: &str) -> Result<Option<(UserId, String)>> {
self.token_userdeviceid self.token_userdeviceid
.get(token.as_bytes())? .get(token.as_bytes())?
.map_or(Ok(None), |bytes| { .map_or(Ok(None), |bytes| {
@ -97,7 +98,7 @@ impl Users {
})?; })?;
Ok(Some(( Ok(Some((
UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { UserId::try_from(utils::string_from_bytes(user_bytes).map_err(|_| {
Error::bad_database("User ID in token_userdeviceid is invalid unicode.") Error::bad_database("User ID in token_userdeviceid is invalid unicode.")
})?) })?)
.map_err(|_| { .map_err(|_| {
@ -112,9 +113,9 @@ impl Users {
/// Returns an iterator over all users on this homeserver. /// Returns an iterator over all users on this homeserver.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn iter(&self) -> impl Iterator<Item = Result<Box<UserId>>> + '_ { pub fn iter(&self) -> impl Iterator<Item = Result<UserId>> + '_ {
self.userid_password.iter().map(|(bytes, _)| { self.userid_password.iter().map(|(bytes, _)| {
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in userid_password is invalid unicode.") Error::bad_database("User ID in userid_password is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) .map_err(|_| Error::bad_database("User ID in userid_password is invalid."))
@ -180,21 +181,20 @@ impl Users {
/// Get the avatar_url of a user. /// Get the avatar_url of a user.
#[tracing::instrument(skip(self, user_id))] #[tracing::instrument(skip(self, user_id))]
pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<Box<MxcUri>>> { pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<MxcUri>> {
self.userid_avatarurl self.userid_avatarurl
.get(user_id.as_bytes())? .get(user_id.as_bytes())?
.map(|bytes| { .map(|bytes| {
let s = utils::string_from_bytes(&bytes) let s = utils::string_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
s.try_into() MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
}) })
.transpose() .transpose()
} }
/// Sets a new avatar_url or removes it if avatar_url is None. /// Sets a new avatar_url or removes it if avatar_url is None.
#[tracing::instrument(skip(self, user_id, avatar_url))] #[tracing::instrument(skip(self, user_id, avatar_url))]
pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<Box<MxcUri>>) -> Result<()> { pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<MxcUri>) -> Result<()> {
if let Some(avatar_url) = avatar_url { if let Some(avatar_url) = avatar_url {
self.userid_avatarurl self.userid_avatarurl
.insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?;
@ -409,7 +409,7 @@ impl Users {
device_id: &DeviceId, device_id: &DeviceId,
key_algorithm: &DeviceKeyAlgorithm, key_algorithm: &DeviceKeyAlgorithm,
globals: &super::globals::Globals, globals: &super::globals::Globals,
) -> Result<Option<(Box<DeviceKeyId>, OneTimeKey)>> { ) -> Result<Option<(DeviceKeyId, OneTimeKey)>> {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
prefix.extend_from_slice(device_id.as_bytes()); prefix.extend_from_slice(device_id.as_bytes());
@ -459,7 +459,7 @@ impl Users {
.scan_prefix(userdeviceid) .scan_prefix(userdeviceid)
.map(|(bytes, _)| { .map(|(bytes, _)| {
Ok::<_, Error>( Ok::<_, Error>(
serde_json::from_slice::<Box<DeviceKeyId>>( serde_json::from_slice::<DeviceKeyId>(
&*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| {
Error::bad_database("OneTimeKey ID in db is invalid.") Error::bad_database("OneTimeKey ID in db is invalid.")
})?, })?,
@ -632,7 +632,7 @@ impl Users {
.ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))?
.as_object_mut() .as_object_mut()
.ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))?
.entry(sender_id.to_owned()) .entry(sender_id.clone())
.or_insert_with(|| serde_json::Map::new().into()); .or_insert_with(|| serde_json::Map::new().into());
signatures signatures
@ -657,7 +657,7 @@ impl Users {
user_or_room_id: &str, user_or_room_id: &str,
from: u64, from: u64,
to: Option<u64>, to: Option<u64>,
) -> impl Iterator<Item = Result<Box<UserId>>> + 'a { ) -> impl Iterator<Item = Result<UserId>> + 'a {
let mut prefix = user_or_room_id.as_bytes().to_vec(); let mut prefix = user_or_room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -683,7 +683,7 @@ impl Users {
} }
}) })
.map(|(_, bytes)| { .map(|(_, bytes)| {
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid."))

49
src/pdu.rs

@ -1,4 +1,4 @@
use crate::Error; use crate::{Database, Error};
use ruma::{ use ruma::{
events::{ events::{
room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyInitialStateEvent, room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyInitialStateEvent,
@ -6,14 +6,14 @@ use ruma::{
EventType, StateEvent, EventType, StateEvent,
}, },
serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw},
state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, UInt, UserId,
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::{ use serde_json::{
json, json,
value::{to_raw_value, RawValue as RawJsonValue}, value::{to_raw_value, RawValue as RawJsonValue},
}; };
use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc}; use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom};
use tracing::warn; use tracing::warn;
/// Content hashes of a PDU. /// Content hashes of a PDU.
@ -25,20 +25,20 @@ pub struct EventHash {
#[derive(Clone, Deserialize, Serialize, Debug)] #[derive(Clone, Deserialize, Serialize, Debug)]
pub struct PduEvent { pub struct PduEvent {
pub event_id: Arc<EventId>, pub event_id: EventId,
pub room_id: Box<RoomId>, pub room_id: RoomId,
pub sender: Box<UserId>, pub sender: UserId,
pub origin_server_ts: UInt, pub origin_server_ts: UInt,
#[serde(rename = "type")] #[serde(rename = "type")]
pub kind: EventType, pub kind: EventType,
pub content: Box<RawJsonValue>, pub content: Box<RawJsonValue>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub state_key: Option<String>, pub state_key: Option<String>,
pub prev_events: Vec<Arc<EventId>>, pub prev_events: Vec<EventId>,
pub depth: UInt, pub depth: UInt,
pub auth_events: Vec<Arc<EventId>>, pub auth_events: Vec<EventId>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub redacts: Option<Arc<EventId>>, pub redacts: Option<EventId>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub unsigned: Option<Box<RawJsonValue>>, pub unsigned: Option<Box<RawJsonValue>>,
pub hashes: EventHash, pub hashes: EventHash,
@ -266,9 +266,7 @@ impl PduEvent {
} }
impl state_res::Event for PduEvent { impl state_res::Event for PduEvent {
type Id = Arc<EventId>; fn event_id(&self) -> &EventId {
fn event_id(&self) -> &Self::Id {
&self.event_id &self.event_id
} }
@ -296,15 +294,15 @@ impl state_res::Event for PduEvent {
self.state_key.as_deref() self.state_key.as_deref()
} }
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + '_> { fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + '_> {
Box::new(self.prev_events.iter()) Box::new(self.prev_events.iter())
} }
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + '_> { fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + '_> {
Box::new(self.auth_events.iter()) Box::new(self.auth_events.iter())
} }
fn redacts(&self) -> Option<&Self::Id> { fn redacts(&self) -> Option<&EventId> {
self.redacts.as_ref() self.redacts.as_ref()
} }
} }
@ -333,19 +331,26 @@ impl Ord for PduEvent {
/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap<String, CanonicalJsonValue>`. /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap<String, CanonicalJsonValue>`.
pub(crate) fn gen_event_id_canonical_json( pub(crate) fn gen_event_id_canonical_json(
pdu: &RawJsonValue, pdu: &RawJsonValue,
) -> crate::Result<(Box<EventId>, CanonicalJsonObject)> { db: &Database,
let value = serde_json::from_str(pdu.get()).map_err(|e| { ) -> crate::Result<(EventId, CanonicalJsonObject)> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
warn!("Error parsing incoming event {:?}: {:?}", pdu, e); warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response") Error::BadServerResponse("Invalid PDU in server response")
})?; })?;
let event_id = format!( let room_id = value
.get("room_id")
.and_then(|id| RoomId::try_from(id.as_str()?).ok())
.expect("Invalid room id in event");
let room_version_id = db.rooms.get_room_version(&room_id);
let event_id = EventId::try_from(&*format!(
"${}", "${}",
// Anything higher than version3 behaves the same // Anything higher than version3 behaves the same
ruma::signatures::reference_hash(&value, &RoomVersionId::V6) ruma::signatures::reference_hash(&value, &room_version_id?)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
) ))
.try_into()
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
Ok((event_id, value)) Ok((event_id, value))
@ -359,7 +364,7 @@ pub struct PduBuilder {
pub content: Box<RawJsonValue>, pub content: Box<RawJsonValue>,
pub unsigned: Option<BTreeMap<String, serde_json::Value>>, pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>, pub state_key: Option<String>,
pub redacts: Option<Arc<EventId>>, pub redacts: Option<EventId>,
} }
/// Direct conversion prevents loss of the empty `state_key` that ruma requires. /// Direct conversion prevents loss of the empty `state_key` that ruma requires.

10
src/ruma_wrapper.rs

@ -20,6 +20,7 @@ use {
}, },
ruma::api::{AuthScheme, IncomingRequest}, ruma::api::{AuthScheme, IncomingRequest},
std::collections::BTreeMap, std::collections::BTreeMap,
std::convert::TryFrom,
std::io::Cursor, std::io::Cursor,
tracing::{debug, warn}, tracing::{debug, warn},
}; };
@ -28,7 +29,7 @@ use {
/// first. /// first.
pub struct Ruma<T: Outgoing> { pub struct Ruma<T: Outgoing> {
pub body: T::Incoming, pub body: T::Incoming,
pub sender_user: Option<Box<UserId>>, pub sender_user: Option<UserId>,
pub sender_device: Option<Box<DeviceId>>, pub sender_device: Option<Box<DeviceId>>,
pub sender_servername: Option<Box<ServerName>>, pub sender_servername: Option<Box<ServerName>>,
// This is None when body is not a valid string // This is None when body is not a valid string
@ -85,7 +86,7 @@ where
registration registration
.get("as_token") .get("as_token")
.and_then(|as_token| as_token.as_str()) .and_then(|as_token| as_token.as_str())
.map_or(false, |as_token| token == Some(as_token)) .map_or(false, |as_token| token.as_deref() == Some(as_token))
}) { }) {
match metadata.authentication { match metadata.authentication {
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
@ -102,7 +103,8 @@ where
.unwrap() .unwrap()
}, },
|string| { |string| {
UserId::parse(string.expect("parsing to string always works")).unwrap() UserId::try_from(string.expect("parsing to string always works"))
.unwrap()
}, },
); );
@ -169,7 +171,7 @@ where
} }
}; };
let origin = match ServerName::parse(origin_str) { let origin = match Box::<ServerName>::try_from(origin_str) {
Ok(s) => s, Ok(s) => s,
_ => { _ => {
warn!( warn!(

182
src/server_server.rs

@ -64,7 +64,6 @@ use std::{
future::Future, future::Future,
mem, mem,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
ops::Deref,
pin::Pin, pin::Pin,
sync::{Arc, RwLock, RwLockWriteGuard}, sync::{Arc, RwLock, RwLockWriteGuard},
time::{Duration, Instant, SystemTime}, time::{Duration, Instant, SystemTime},
@ -397,7 +396,10 @@ async fn find_actual_destination(
} }
if let Some(port) = force_port { if let Some(port) = force_port {
FedDest::Named(delegated_hostname, format!(":{}", port)) FedDest::Named(
delegated_hostname,
format!(":{}", port.to_string()),
)
} else { } else {
add_port_to_hostname(&delegated_hostname) add_port_to_hostname(&delegated_hostname)
} }
@ -430,7 +432,10 @@ async fn find_actual_destination(
} }
if let Some(port) = force_port { if let Some(port) = force_port {
FedDest::Named(hostname.clone(), format!(":{}", port)) FedDest::Named(
hostname.clone(),
format!(":{}", port.to_string()),
)
} else { } else {
add_port_to_hostname(&hostname) add_port_to_hostname(&hostname)
} }
@ -545,10 +550,11 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json<String> {
return Json("Federation is disabled.".to_owned()); return Json("Federation is disabled.".to_owned());
} }
let mut verify_keys: BTreeMap<Box<ServerSigningKeyId>, VerifyKey> = BTreeMap::new(); let mut verify_keys = BTreeMap::new();
verify_keys.insert( verify_keys.insert(
format!("ed25519:{}", db.globals.keypair().version()) ServerSigningKeyId::try_from(
.try_into() format!("ed25519:{}", db.globals.keypair().version()).as_str(),
)
.expect("found invalid server signing keys in DB"), .expect("found invalid server signing keys in DB"),
VerifyKey { VerifyKey {
key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD),
@ -719,7 +725,7 @@ pub async fn send_transaction_message_route(
for pdu in &body.pdus { for pdu in &body.pdus {
// We do not add the event_id field to the pdu here because of signature and hashes checks // We do not add the event_id field to the pdu here because of signature and hashes checks
let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) {
Ok(t) => t, Ok(t) => t,
Err(_) => { Err(_) => {
// Event could not be converted to canonical json // Event could not be converted to canonical json
@ -730,7 +736,7 @@ pub async fn send_transaction_message_route(
// 0. Check the server is in the room // 0. Check the server is in the room
let room_id = match value let room_id = match value
.get("room_id") .get("room_id")
.and_then(|id| RoomId::parse(id.as_str()?).ok()) .and_then(|id| RoomId::try_from(id.as_str()?).ok())
{ {
Some(id) => id, Some(id) => id,
None => { None => {
@ -995,9 +1001,14 @@ pub(crate) async fn handle_incoming_pdu<'a>(
} }
// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
let mut graph: HashMap<Arc<EventId>, _> = HashMap::new(); let mut graph = HashMap::new();
let mut eventid_info = HashMap::new(); let mut eventid_info = HashMap::new();
let mut todo_outlier_stack: Vec<Arc<EventId>> = incoming_pdu.prev_events.clone(); let mut todo_outlier_stack: Vec<_> = incoming_pdu
.prev_events
.iter()
.cloned()
.map(Arc::new)
.collect();
let mut amount = 0; let mut amount = 0;
@ -1016,7 +1027,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
if amount > 100 { if amount > 100 {
// Max limit reached // Max limit reached
warn!("Max prev event limit reached!"); warn!("Max prev event limit reached!");
graph.insert(prev_event_id.clone(), HashSet::new()); graph.insert((*prev_event_id).clone(), HashSet::new());
continue; continue;
} }
@ -1027,27 +1038,27 @@ pub(crate) async fn handle_incoming_pdu<'a>(
amount += 1; amount += 1;
for prev_prev in &pdu.prev_events { for prev_prev in &pdu.prev_events {
if !graph.contains_key(prev_prev) { if !graph.contains_key(prev_prev) {
todo_outlier_stack.push(dbg!(prev_prev.clone())); todo_outlier_stack.push(dbg!(Arc::new(prev_prev.clone())));
} }
} }
graph.insert( graph.insert(
prev_event_id.clone(), (*prev_event_id).clone(),
pdu.prev_events.iter().cloned().collect(), pdu.prev_events.iter().cloned().collect(),
); );
} else { } else {
// Time based check failed // Time based check failed
graph.insert(prev_event_id.clone(), HashSet::new()); graph.insert((*prev_event_id).clone(), HashSet::new());
} }
eventid_info.insert(prev_event_id.clone(), (pdu, json)); eventid_info.insert(prev_event_id.clone(), (pdu, json));
} else { } else {
// Get json failed // Get json failed
graph.insert(prev_event_id.clone(), HashSet::new()); graph.insert((*prev_event_id).clone(), HashSet::new());
} }
} else { } else {
// Fetch and handle failed // Fetch and handle failed
graph.insert(prev_event_id.clone(), HashSet::new()); graph.insert((*prev_event_id).clone(), HashSet::new());
} }
} }
@ -1063,6 +1074,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
.get(event_id) .get(event_id)
.map_or_else(|| uint!(0), |info| info.0.origin_server_ts), .map_or_else(|| uint!(0), |info| info.0.origin_server_ts),
), ),
ruma::event_id!("$notimportant"),
)) ))
}) })
.map_err(|_| "Error sorting prev events".to_owned())?; .map_err(|_| "Error sorting prev events".to_owned())?;
@ -1072,7 +1084,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
if errors >= 5 { if errors >= 5 {
break; break;
} }
if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { if let Some((pdu, json)) = eventid_info.remove(&prev_id) {
if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts {
continue; continue;
} }
@ -1188,7 +1200,8 @@ fn handle_outlier_pdu<'a>(
&incoming_pdu &incoming_pdu
.auth_events .auth_events
.iter() .iter()
.map(|x| Arc::from(&**x)) .cloned()
.map(Arc::new)
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
create_event, create_event,
room_id, room_id,
@ -1318,7 +1331,7 @@ async fn upgrade_outlier_to_timeline_pdu(
let mut state_at_incoming_event = None; let mut state_at_incoming_event = None;
if incoming_pdu.prev_events.len() == 1 { if incoming_pdu.prev_events.len() == 1 {
let prev_event = &*incoming_pdu.prev_events[0]; let prev_event = &incoming_pdu.prev_events[0];
let prev_event_sstatehash = db let prev_event_sstatehash = db
.rooms .rooms
.pdu_shortstatehash(prev_event) .pdu_shortstatehash(prev_event)
@ -1340,7 +1353,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
state.insert(shortstatekey, Arc::from(prev_event)); state.insert(shortstatekey, Arc::new(prev_event.clone()));
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1384,7 +1397,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone()));
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1397,13 +1410,14 @@ async fn upgrade_outlier_to_timeline_pdu(
.get_statekey_from_short(k) .get_statekey_from_short(k)
.map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?;
state.insert(k, id.clone()); state.insert(k, (*id).clone());
starting_events.push(id); starting_events.push(id);
} }
auth_chain_sets.push( auth_chain_sets.push(
get_auth_chain(room_id, starting_events, db) get_auth_chain(room_id, starting_events, db)
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.map(|event_id| (*event_id).clone())
.collect(), .collect(),
); );
@ -1430,7 +1444,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&event_type, &state_key, &db.globals) .get_or_create_shortstatekey(&event_type, &state_key, &db.globals)
.map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?;
Ok((shortstatekey, event_id)) Ok((shortstatekey, Arc::new(event_id)))
}) })
.collect::<Result<_, String>>()?, .collect::<Result<_, String>>()?,
), ),
@ -1465,7 +1479,8 @@ async fn upgrade_outlier_to_timeline_pdu(
origin, origin,
&res.pdu_ids &res.pdu_ids
.iter() .iter()
.map(|x| Arc::from(&**x)) .cloned()
.map(Arc::new)
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
create_event, create_event,
room_id, room_id,
@ -1473,7 +1488,7 @@ async fn upgrade_outlier_to_timeline_pdu(
) )
.await; .await;
let mut state: BTreeMap<_, Arc<EventId>> = BTreeMap::new(); let mut state = BTreeMap::new();
for (pdu, _) in state_vec { for (pdu, _) in state_vec {
let state_key = pdu let state_key = pdu
.state_key .state_key
@ -1487,7 +1502,7 @@ async fn upgrade_outlier_to_timeline_pdu(
match state.entry(shortstatekey) { match state.entry(shortstatekey) {
btree_map::Entry::Vacant(v) => { btree_map::Entry::Vacant(v) => {
v.insert(Arc::from(&*pdu.event_id)); v.insert(Arc::new(pdu.event_id.clone()));
} }
btree_map::Entry::Occupied(_) => return Err( btree_map::Entry::Occupied(_) => return Err(
"State event's type and state_key combination exists multiple times." "State event's type and state_key combination exists multiple times."
@ -1562,7 +1577,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.to_owned()) .entry(room_id.clone())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -1632,7 +1647,7 @@ async fn upgrade_outlier_to_timeline_pdu(
db, db,
&incoming_pdu, &incoming_pdu,
val, val,
extremities.iter().map(Deref::deref), extremities,
state_ids_compressed, state_ids_compressed,
soft_fail, soft_fail,
&state_lock, &state_lock,
@ -1700,7 +1715,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
leaf_state.insert(shortstatekey, Arc::from(&*leaf_pdu.event_id)); leaf_state.insert(shortstatekey, Arc::new(leaf_pdu.event_id.clone()));
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1715,7 +1730,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); state_after.insert(shortstatekey, Arc::new(incoming_pdu.event_id.clone()));
} }
fork_states.push(state_after); fork_states.push(state_after);
@ -1747,6 +1762,7 @@ async fn upgrade_outlier_to_timeline_pdu(
db, db,
) )
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.map(|event_id| (*event_id).clone())
.collect(), .collect(),
); );
} }
@ -1755,7 +1771,11 @@ async fn upgrade_outlier_to_timeline_pdu(
.into_iter() .into_iter()
.map(|map| { .map(|map| {
map.into_iter() map.into_iter()
.map(|(k, id)| db.rooms.get_statekey_from_short(k).map(|k| (k, id))) .map(|(k, id)| {
db.rooms
.get_statekey_from_short(k)
.map(|k| (k, (*id).clone()))
})
.collect::<Result<StateMap<_>>>() .collect::<Result<StateMap<_>>>()
}) })
.collect::<Result<_>>() .collect::<Result<_>>()
@ -1812,7 +1832,7 @@ async fn upgrade_outlier_to_timeline_pdu(
db, db,
&incoming_pdu, &incoming_pdu,
val, val,
extremities.iter().map(Deref::deref), extremities,
state_ids_compressed, state_ids_compressed,
soft_fail, soft_fail,
&state_lock, &state_lock,
@ -1854,8 +1874,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
let mut pdus = vec![]; let mut pdus = vec![];
for id in events { for id in events {
if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(id) {
{
// Exponential backoff // Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
@ -1892,10 +1911,10 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
Ok(res) => { Ok(res) => {
warn!("Got {} over federation", id); warn!("Got {} over federation", id);
let (calculated_event_id, value) = let (calculated_event_id, value) =
match crate::pdu::gen_event_id_canonical_json(&res.pdu) { match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) {
Ok(t) => t, Ok(t) => t,
Err(_) => { Err(_) => {
back_off((**id).to_owned()); back_off((**id).clone());
continue; continue;
} }
}; };
@ -1920,14 +1939,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
Ok((pdu, json)) => (pdu, Some(json)), Ok((pdu, json)) => (pdu, Some(json)),
Err(e) => { Err(e) => {
warn!("Authentication of event {} failed: {:?}", id, e); warn!("Authentication of event {} failed: {:?}", id, e);
back_off((**id).to_owned()); back_off((**id).clone());
continue; continue;
} }
} }
} }
Err(_) => { Err(_) => {
warn!("Failed to fetch event: {}", id); warn!("Failed to fetch event: {}", id);
back_off((**id).to_owned()); back_off((**id).clone());
continue; continue;
} }
} }
@ -2105,11 +2124,11 @@ pub(crate) async fn fetch_signing_keys(
/// Append the incoming event setting the state snapshot to the state from the /// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event. /// server that sent the event.
#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))] #[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))]
fn append_incoming_pdu<'a>( fn append_incoming_pdu(
db: &Database, db: &Database,
pdu: &PduEvent, pdu: &PduEvent,
pdu_json: CanonicalJsonObject, pdu_json: CanonicalJsonObject,
new_room_leaves: impl IntoIterator<Item = &'a EventId> + Clone + Debug, new_room_leaves: HashSet<EventId>,
state_ids_compressed: HashSet<CompressedStateEvent>, state_ids_compressed: HashSet<CompressedStateEvent>,
soft_fail: bool, soft_fail: bool,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
@ -2126,11 +2145,19 @@ fn append_incoming_pdu<'a>(
if soft_fail { if soft_fail {
db.rooms db.rooms
.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; db.rooms.replace_pdu_leaves(
&pdu.room_id,
&new_room_leaves.into_iter().collect::<Vec<_>>(),
)?;
return Ok(None); return Ok(None);
} }
let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; let pdu_id = db.rooms.append_pdu(
pdu,
pdu_json,
&new_room_leaves.into_iter().collect::<Vec<_>>(),
db,
)?;
for appservice in db.appservice.all()? { for appservice in db.appservice.all()? {
if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? {
@ -2271,13 +2298,13 @@ fn get_auth_chain_inner(
event_id: &EventId, event_id: &EventId,
db: &Database, db: &Database,
) -> Result<HashSet<u64>> { ) -> Result<HashSet<u64>> {
let mut todo = vec![Arc::from(event_id)]; let mut todo = vec![event_id.clone()];
let mut found = HashSet::new(); let mut found = HashSet::new();
while let Some(event_id) = todo.pop() { while let Some(event_id) = todo.pop() {
match db.rooms.get_pdu(&event_id) { match db.rooms.get_pdu(&event_id) {
Ok(Some(pdu)) => { Ok(Some(pdu)) => {
if pdu.room_id != room_id { if &pdu.room_id != room_id {
return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db"));
} }
for auth_event in &pdu.auth_events { for auth_event in &pdu.auth_events {
@ -2336,10 +2363,10 @@ pub fn get_event_route(
.and_then(|val| val.as_str()) .and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?; .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let room_id = <&RoomId>::try_from(room_id_str) let room_id = RoomId::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
if !db.rooms.server_in_room(sender_servername, room_id)? { if !db.rooms.server_in_room(sender_servername, &room_id)? {
return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found."));
} }
@ -2390,7 +2417,7 @@ pub fn get_missing_events_route(
.and_then(|val| val.as_str()) .and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?; .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let event_room_id = <&RoomId>::try_from(room_id_str) let event_room_id = RoomId::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
if event_room_id != body.room_id { if event_room_id != body.room_id {
@ -2409,7 +2436,7 @@ pub fn get_missing_events_route(
continue; continue;
} }
queued_events.extend_from_slice( queued_events.extend_from_slice(
&serde_json::from_value::<Vec<Box<EventId>>>( &serde_json::from_value::<Vec<EventId>>(
serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| {
Error::bad_database("Event in db has no prev_events field.") Error::bad_database("Event in db has no prev_events field.")
})?) })?)
@ -2458,14 +2485,14 @@ pub fn get_event_authorization_route(
.and_then(|val| val.as_str()) .and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?; .ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let room_id = <&RoomId>::try_from(room_id_str) let room_id = RoomId::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
if !db.rooms.server_in_room(sender_servername, room_id)? { if !db.rooms.server_in_room(sender_servername, &room_id)? {
return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found."));
} }
let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::new(body.event_id.clone())], &db)?;
Ok(get_event_authorization::v1::Response { Ok(get_event_authorization::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2523,7 +2550,7 @@ pub fn get_room_state_route(
}) })
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?;
Ok(get_room_state::v1::Response { Ok(get_room_state::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2579,13 +2606,13 @@ pub fn get_room_state_ids_route(
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)?
.into_iter() .into_iter()
.map(|(_, id)| (*id).to_owned()) .map(|(_, id)| (*id).clone())
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?;
Ok(get_room_state_ids::v1::Response { Ok(get_room_state_ids::v1::Response {
auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), auth_chain_ids: auth_chain_ids.map(|id| (*id).clone()).collect(),
pdu_ids, pdu_ids,
} }
.into()) .into())
@ -2643,9 +2670,12 @@ pub fn create_join_event_template_route(
None None
}; };
// If there was no create event yet, assume we are creating a version 6 room right now // If there was no create event yet, assume we are creating a room with the default version
let room_version_id = // right now
create_event_content.map_or(RoomVersionId::V6, |create_event| create_event.room_version); let room_version_id = create_event_content
.map_or(db.globals.default_room_version(), |create_event| {
create_event.room_version
});
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
if !body.ver.contains(&room_version_id) { if !body.ver.contains(&room_version_id) {
@ -2665,7 +2695,6 @@ pub fn create_join_event_template_route(
membership: MembershipState::Join, membership: MembershipState::Join,
third_party_invite: None, third_party_invite: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("member event is valid value"); .expect("member event is valid value");
@ -2699,7 +2728,7 @@ pub fn create_join_event_template_route(
} }
let pdu = PduEvent { let pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(), event_id: ruma::event_id!("$thiswillbefilledinlater"),
room_id: body.room_id.clone(), room_id: body.room_id.clone(),
sender: body.user_id.clone(), sender: body.user_id.clone(),
origin_server_ts: utils::millis_since_unix_epoch() origin_server_ts: utils::millis_since_unix_epoch()
@ -2786,7 +2815,7 @@ async fn create_join_event(
// let mut auth_cache = EventMap::new(); // let mut auth_cache = EventMap::new();
// We do not add the event_id field to the pdu here because of signature and hashes checks // We do not add the event_id field to the pdu here because of signature and hashes checks
let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) {
Ok(t) => t, Ok(t) => t,
Err(_) => { Err(_) => {
// Event could not be converted to canonical json // Event could not be converted to canonical json
@ -2811,7 +2840,7 @@ async fn create_join_event(
.roomid_mutex_federation .roomid_mutex_federation
.write() .write()
.unwrap() .unwrap()
.entry(room_id.to_owned()) .entry(room_id.clone())
.or_default(), .or_default(),
); );
let mutex_lock = mutex.lock().await; let mutex_lock = mutex.lock().await;
@ -2910,7 +2939,7 @@ pub async fn create_invite_route(
return Err(Error::bad_config("Federation is disabled.")); return Err(Error::bad_config("Federation is disabled."));
} }
if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 { if !db.rooms.is_supported_version(&db, &body.room_version) {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::IncompatibleRoomVersion { ErrorKind::IncompatibleRoomVersion {
room_version: body.room_version.clone(), room_version: body.room_version.clone(),
@ -2931,7 +2960,7 @@ pub async fn create_invite_route(
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?;
// Generate event id // Generate event id
let event_id = EventId::parse(format!( let event_id = EventId::try_from(&*format!(
"${}", "${}",
ruma::signatures::reference_hash(&signed_event, &body.room_version) ruma::signatures::reference_hash(&signed_event, &body.room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -2944,7 +2973,7 @@ pub async fn create_invite_route(
CanonicalJsonValue::String(event_id.into()), CanonicalJsonValue::String(event_id.into()),
); );
let sender: Box<_> = serde_json::from_value( let sender = serde_json::from_value(
signed_event signed_event
.get("sender") .get("sender")
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
@ -2956,7 +2985,7 @@ pub async fn create_invite_route(
) )
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?;
let invited_user: Box<_> = serde_json::from_value( let invited_user = serde_json::from_value(
signed_event signed_event
.get("state_key") .get("state_key")
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
@ -3207,7 +3236,7 @@ pub(crate) async fn fetch_required_signing_keys(
let fetch_res = fetch_signing_keys( let fetch_res = fetch_signing_keys(
db, db,
signature_server.as_str().try_into().map_err(|_| { &Box::<ServerName>::try_from(&**signature_server).map_err(|_| {
Error::BadServerResponse("Invalid servername in signatures of server response pdu.") Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
})?, })?,
signature_ids, signature_ids,
@ -3235,7 +3264,7 @@ pub(crate) async fn fetch_required_signing_keys(
// the PDUs and either cache the key or add it to the list that needs to be retrieved. // the PDUs and either cache the key or add it to the list that needs to be retrieved.
fn get_server_keys_from_cache( fn get_server_keys_from_cache(
pdu: &RawJsonValue, pdu: &RawJsonValue,
servers: &mut BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>>, servers: &mut BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, QueryCriteria>>,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, String>>>,
db: &Database, db: &Database,
@ -3245,12 +3274,11 @@ fn get_server_keys_from_cache(
Error::BadServerResponse("Invalid PDU in server response") Error::BadServerResponse("Invalid PDU in server response")
})?; })?;
let event_id = format!( let event_id = EventId::try_from(&*format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
); ))
let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
if let Some((time, tries)) = db if let Some((time, tries)) = db
@ -3258,7 +3286,7 @@ fn get_server_keys_from_cache(
.bad_event_ratelimiter .bad_event_ratelimiter
.read() .read()
.unwrap() .unwrap()
.get(event_id) .get(&event_id)
{ {
// Exponential backoff // Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
@ -3292,7 +3320,7 @@ fn get_server_keys_from_cache(
let contains_all_ids = let contains_all_ids =
|keys: &BTreeMap<String, String>| signature_ids.iter().all(|id| keys.contains_key(id)); |keys: &BTreeMap<String, String>| signature_ids.iter().all(|id| keys.contains_key(id));
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { let origin = &Box::<ServerName>::try_from(&**signature_server).map_err(|_| {
Error::BadServerResponse("Invalid servername in signatures of server response pdu.") Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
})?; })?;
@ -3311,7 +3339,7 @@ fn get_server_keys_from_cache(
if !contains_all_ids(&result) { if !contains_all_ids(&result) {
trace!("Signing key not loaded for {}", origin); trace!("Signing key not loaded for {}", origin);
servers.insert(origin.to_owned(), BTreeMap::new()); servers.insert(origin.clone(), BTreeMap::new());
} }
pub_key_map.insert(origin.to_string(), result); pub_key_map.insert(origin.to_string(), result);
@ -3326,7 +3354,7 @@ pub(crate) async fn fetch_join_signing_keys(
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
db: &Database, db: &Database,
) -> Result<()> { ) -> Result<()> {
let mut servers: BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>> = let mut servers: BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, QueryCriteria>> =
BTreeMap::new(); BTreeMap::new();
{ {
@ -3360,6 +3388,10 @@ pub(crate) async fn fetch_join_signing_keys(
server, server,
get_remote_server_keys_batch::v2::Request { get_remote_server_keys_batch::v2::Request {
server_keys: servers.clone(), server_keys: servers.clone(),
minimum_valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(60),
)
.expect("time is valid"),
}, },
) )
.await .await

Loading…
Cancel
Save