Compare commits

..

51 Commits

Author SHA1 Message Date
Timo Kösters 9b57c89df6 Merge branch 'more-event-id-arcs' into 'next' 4 years ago
Jonas Platte 34d3f74f36
Use Arc for EventIds in PDUs 4 years ago
Timo Kösters 11a21fc136 Merge branch 'up-ruma' into 'next' 4 years ago
Jonas Platte 0183d003d0
Revert rename of Ruma<_> parameters 4 years ago
Jonas Platte f712455047
Reduce EventId copying 4 years ago
Jonas Platte 58ea081762
Use int! macro instead of Int::from 4 years ago
Jonas Platte bffddbd487
Simplify identifier parsing code 4 years ago
Jonas Platte 41fef1da64
Remove unnecessary .to_string() calls 4 years ago
Jonas Platte 892a0525f2
Upgrade Ruma 4 years ago
Jonas Platte 1fc616320a
Use struct init shorthand 4 years ago
Timo Kösters 14a178d783 Merge branch 'update-docker-base-image' into 'next' 4 years ago
Jonas Zohren 339a26f56c Update docker images 4 years ago
Timo Kösters ca724b6340 Merge branch '226-fix-docker-ci-issues' into 'next' 4 years ago
Jonas Zohren 9bfc7b34b6 Fixes for !225 4 years ago
Timo Kösters afa5d449c6 Merge branch 'better-multiarch-building-the-second-attempt' into 'next' 4 years ago
Jonas Zohren 2fff720df3 CI: New Multiarch builds and Docker images + cargo clippy/test output now integrated into GitLab 4 years ago
Timo Kösters da00f611e9 Merge branch 'feature/turn-server-settings' into 'next' 4 years ago
Moritz Bitsch 9fccbb014a Implement TURN server authentication with hmac 4 years ago
Moritz Bitsch 109892b4b7 Implement turn server settings 4 years ago
Jonas Platte 24a835647c Merge branch 'nyaaori/fix-room-creation' into 'next' 4 years ago
Nyaaori c4bce1d0c7
Cleanup room.rs; replace unwraps with map_err 4 years ago
Jonas Platte 9b63708685 Merge branch 'nyaaori/fix-join-panic' into 'next' 4 years ago
Nyaaori 86177faae7
Fix join panic bug 4 years ago
Timo Kösters 2a749c1e99 Merge branch 'nyaaori/implement-report' into 'next' 4 years ago
Timo Kösters 5ae753b2e7 Merge branch 'amorgan-next-patch-81816' into 'next' 4 years ago
Andrew Morgan 743bdbe961 Add 'Federation publicRoom Name/topic keys are correct' test to sytest whitelist 4 years ago
Nyaaori d5d25fb064
Preserve all m.room.create entries when performing room upgrades 4 years ago
Nyaaori 8087a26a35
Make createRoom follow spec for m.room.create, allowing creation of spaces 4 years ago
Nyaaori bbe16f8467
Update Ruma 4 years ago
Nyaaori 50f931a2fd
Cleanup and fix validation in report.rs, lower max report length, better html 4 years ago
Nyaaori 1541b93f45
Make reports look nicer and reduce spam potential, increase max report length to 1000 characters 4 years ago
Nyaaori ccf501a420
Initial implementation of /report, fixing #13 4 years ago
Timo Kösters 6f70beb78c Merge branch 'nyaaori/fix-bad-json' into 'next' 4 years ago
Timo Kösters 259fc580ca Merge branch 'nyaaori/otk-sync-fix' into 'next' 4 years ago
Nyaaori 55d78b1914
Bump Ruma version to fix M_BAD_JSON on login 4 years ago
Nyaaori 484a044b50
Remove device_one_time_keys_count from is_empty() sync checks, fixing sync issue as reported by Nekron 4 years ago
Nyaaori 2264a5f945
Merge branch 'next' of https://gitlab.com/famedly/conduit into next 4 years ago
Timo Kösters e88929e154 Merge branch 'nyaaori/fix-otk-reporting' into 'next' 4 years ago
Nyaaori d996d1b0e6
Always send device_one_time_keys_count, fixing #178 4 years ago
Timo Kösters dc8bc4a880 Merge branch 'up-ruma' into 'next' 4 years ago
Timo Kösters 87694f4369 Merge branch 'moa31-next-patch-76037' into 'next' 4 years ago
Timo Kösters 335027e739 Merge branch 'nyaaori/admin-check' into 'next' 4 years ago
Timo Kösters 0ccda5f1c7 Merge branch 'nyaaori/allow_encryption' into 'next' 4 years ago
Nyaaori 9082a531c9
Make allow_encryption work again, fixing #115 4 years ago
Jonas Platte 91afa1e0e0
Make Result alias usable with any error type 4 years ago
Jonas Platte f2ef5677e0
Reduce turbofish usage 4 years ago
Jonas Platte 1c4d9af586
Enable more lints and apply their suggestions 4 years ago
Jonas Platte 09895a20c8
Upgrade Ruma 4 years ago
Nyaaori 636db8cfaa
Make allow_encryption work again, fixing #115 4 years ago
Nyaaori 6bc8fb2ae7
Implement admin check and add config option for allowing room creation 4 years ago
Luc-pascal Ceccaldi 5b23d3d06e Change listen address when running inside a Container to prevent Bad Gateway error 4 years ago
  1. 2
      .dockerignore
  2. 337
      .gitlab-ci.yml
  3. 92
      Cargo.lock
  4. 10
      Cargo.toml
  5. 55
      DEPLOY.md
  6. 132
      Dockerfile
  7. 3
      conduit-example.toml
  8. 107
      docker/README.md
  9. 77
      docker/ci-binaries-packaging.Dockerfile
  10. 6
      docker/healthcheck.sh
  11. 2
      rust-toolchain
  12. 111
      src/client_server/account.rs
  13. 6
      src/client_server/capabilities.rs
  14. 12
      src/client_server/config.rs
  15. 16
      src/client_server/context.rs
  16. 10
      src/client_server/device.rs
  17. 256
      src/client_server/directory.rs
  18. 30
      src/client_server/keys.rs
  19. 212
      src/client_server/membership.rs
  20. 47
      src/client_server/message.rs
  21. 2
      src/client_server/mod.rs
  22. 20
      src/client_server/profile.rs
  23. 44
      src/client_server/push.rs
  24. 9
      src/client_server/redact.rs
  25. 84
      src/client_server/report.rs
  26. 275
      src/client_server/room.rs
  27. 6
      src/client_server/search.rs
  28. 12
      src/client_server/session.rs
  29. 36
      src/client_server/state.rs
  30. 89
      src/client_server/sync.rs
  31. 23
      src/client_server/tag.rs
  32. 55
      src/client_server/voip.rs
  33. 44
      src/database.rs
  34. 4
      src/database/abstraction.rs
  35. 26
      src/database/admin.rs
  36. 45
      src/database/globals.rs
  37. 21
      src/database/key_backups.rs
  38. 6
      src/database/proxy.rs
  39. 38
      src/database/pusher.rs
  40. 582
      src/database/rooms.rs
  41. 63
      src/database/rooms/edus.rs
  42. 61
      src/database/sending.rs
  43. 29
      src/database/uiaa.rs
  44. 54
      src/database/users.rs
  45. 2
      src/error.rs
  46. 6
      src/lib.rs
  47. 8
      src/main.rs
  48. 132
      src/pdu.rs
  49. 12
      src/ruma_wrapper.rs
  50. 370
      src/server_server.rs
  51. 2
      src/utils.rs
  52. 1
      tests/sytest/sytest-whitelist

2
.dockerignore

@ -14,6 +14,8 @@ docker-compose*
# Git folder # Git folder
.git .git
.gitea .gitea
.gitlab
.github
# Dot files # Dot files
.env .env

337
.gitlab-ci.yml

@ -9,7 +9,6 @@ variables:
FF_USE_FASTZIP: 1 FF_USE_FASTZIP: 1
CACHE_COMPRESSION_LEVEL: fastest CACHE_COMPRESSION_LEVEL: fastest
# --------------------------------------------------------------------- # # --------------------------------------------------------------------- #
# Cargo: Compiling for different architectures # # Cargo: Compiling for different architectures #
# --------------------------------------------------------------------- # # --------------------------------------------------------------------- #
@ -20,7 +19,7 @@ variables:
rules: rules:
- if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "master"'
- if: '$CI_COMMIT_BRANCH == "next"' - if: '$CI_COMMIT_BRANCH == "next"'
- if: '$CI_COMMIT_TAG' - if: "$CI_COMMIT_TAG"
interruptible: true interruptible: true
image: "rust:latest" image: "rust:latest"
tags: ["docker"] tags: ["docker"]
@ -28,258 +27,184 @@ variables:
paths: paths:
- cargohome - cargohome
- target/ - target/
key: "build_cache-$TARGET-release" key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release"
variables: variables:
CARGO_PROFILE_RELEASE_LTO=true CARGO_PROFILE_RELEASE_LTO: "true"
CARGO_PROFILE_RELEASE_CODEGEN_UNITS=1 CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
before_script: before_script:
- 'echo "Building for target $TARGET"' - 'echo "Building for target $TARGET"'
- 'mkdir -p cargohome && CARGOHOME="cargohome"' - 'mkdir -p cargohome && CARGOHOME="cargohome"'
- "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - "rustc --version && cargo --version && rustup show" # Print version info for debugging
- 'apt-get update -yqq'
- 'echo "Installing packages: $NEEDED_PACKAGES"'
- "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES"
- "rustup target add $TARGET" - "rustup target add $TARGET"
script: script:
- time cargo build --target $TARGET --release - time cargo build --target $TARGET --release
- 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"'
artifacts: artifacts:
expire_in: never expire_in: never
build:release:cargo:x86_64-unknown-linux-musl-with-debug:
build:release:cargo:x86_64-unknown-linux-gnu:
extends: .build-cargo-shared-settings extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:x86_64-musl
variables: variables:
TARGET: "x86_64-unknown-linux-gnu" CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling
TARGET: "x86_64-unknown-linux-musl"
after_script:
- "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug"
artifacts: artifacts:
name: "conduit-x86_64-unknown-linux-gnu" name: "conduit-x86_64-unknown-linux-musl-with-debug"
paths: paths:
- "conduit-x86_64-unknown-linux-gnu" - "conduit-x86_64-unknown-linux-musl-with-debug"
expose_as: "Conduit for x86_64-unknown-linux-gnu" expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug"
build:release:cargo:armv7-unknown-linux-gnueabihf: build:release:cargo:x86_64-unknown-linux-musl:
extends: .build-cargo-shared-settings extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:x86_64-musl
variables: variables:
TARGET: "armv7-unknown-linux-gnueabihf" TARGET: "x86_64-unknown-linux-musl"
NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross"
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc
CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc
CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++
artifacts: artifacts:
name: "conduit-armv7-unknown-linux-gnueabihf" name: "conduit-x86_64-unknown-linux-musl"
paths: paths:
- "conduit-armv7-unknown-linux-gnueabihf" - "conduit-x86_64-unknown-linux-musl"
expose_as: "Conduit for armv7-unknown-linux-gnueabihf" expose_as: "Conduit for x86_64-unknown-linux-musl"
build:release:cargo:aarch64-unknown-linux-gnu: build:release:cargo:arm-unknown-linux-musleabihf:
extends: .build-cargo-shared-settings extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:arm-musleabihf
variables: variables:
TARGET: "aarch64-unknown-linux-gnu" TARGET: "arm-unknown-linux-musleabihf"
NEEDED_PACKAGES: "build-essential gcc-10-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross"
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc
CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc
CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++
TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-10"
TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-10"
artifacts: artifacts:
name: "conduit-aarch64-unknown-linux-gnu" name: "conduit-arm-unknown-linux-musleabihf"
paths: paths:
- "conduit-aarch64-unknown-linux-gnu" - "conduit-arm-unknown-linux-musleabihf"
expose_as: "Conduit for aarch64-unknown-linux-gnu" expose_as: "Conduit for arm-unknown-linux-musleabihf"
build:release:cargo:x86_64-unknown-linux-musl: build:release:cargo:armv7-unknown-linux-musleabihf:
extends: .build-cargo-shared-settings extends: .build-cargo-shared-settings
image: "rust:alpine" image: messense/rust-musl-cross:armv7-musleabihf
variables: variables:
TARGET: "x86_64-unknown-linux-musl" TARGET: "armv7-unknown-linux-musleabihf"
before_script:
- 'echo "Building for target $TARGET"'
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
- "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
- "rustup target add $TARGET"
- "apk add libc-dev"
artifacts: artifacts:
name: "conduit-x86_64-unknown-linux-musl" name: "conduit-armv7-unknown-linux-musleabihf"
paths: paths:
- "conduit-x86_64-unknown-linux-musl" - "conduit-armv7-unknown-linux-musleabihf"
expose_as: "Conduit for x86_64-unknown-linux-musl" expose_as: "Conduit for armv7-unknown-linux-musleabihf"
build:release:cargo:aarch64-unknown-linux-musl:
extends: .build-cargo-shared-settings
image: messense/rust-musl-cross:aarch64-musl
variables:
TARGET: "aarch64-unknown-linux-musl"
artifacts:
name: "conduit-aarch64-unknown-linux-musl"
paths:
- "conduit-aarch64-unknown-linux-musl"
expose_as: "Conduit for aarch64-unknown-linux-musl"
.cargo-debug-shared-settings: .cargo-debug-shared-settings:
extends: ".build-cargo-shared-settings" extends: ".build-cargo-shared-settings"
rules: rules:
- if: '$CI_COMMIT_BRANCH' - if: '$CI_COMMIT_BRANCH != "master"'
- if: '$CI_COMMIT_TAG'
cache: cache:
key: "build_cache-$TARGET-debug" key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug"
script: script:
- "time cargo build --target $TARGET" - "time cargo build --target $TARGET"
- 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"'
artifacts: artifacts:
expire_in: 4 weeks expire_in: 4 weeks
build:debug:cargo:x86_64-unknown-linux-gnu:
extends: ".cargo-debug-shared-settings"
variables:
TARGET: "x86_64-unknown-linux-gnu"
artifacts:
name: "conduit-debug-x86_64-unknown-linux-gnu"
paths:
- "conduit-debug-x86_64-unknown-linux-gnu"
expose_as: "Conduit DEBUG for x86_64-unknown-linux-gnu"
build:debug:cargo:x86_64-unknown-linux-musl: build:debug:cargo:x86_64-unknown-linux-musl:
extends: ".cargo-debug-shared-settings" extends: ".cargo-debug-shared-settings"
image: "rust:alpine" image: messense/rust-musl-cross:x86_64-musl
variables: variables:
TARGET: "x86_64-unknown-linux-musl" TARGET: "x86_64-unknown-linux-musl"
before_script:
- 'echo "Building for target $TARGET"'
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
- "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
- "rustup target add $TARGET"
- "apk add libc-dev"
artifacts: artifacts:
name: "conduit-debug-x86_64-unknown-linux-musl" name: "conduit-debug-x86_64-unknown-linux-musl"
paths: paths:
- "conduit-debug-x86_64-unknown-linux-musl" - "conduit-debug-x86_64-unknown-linux-musl"
expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl" expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl"
# --------------------------------------------------------------------- #
# Cargo: Compiling deb packages for different architectures #
# --------------------------------------------------------------------- #
.build-cargo-deb-shared-settings:
stage: "build"
needs: [ ]
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
- if: '$CI_COMMIT_BRANCH == "next"'
- if: '$CI_COMMIT_TAG'
interruptible: true
image: "rust:latest"
tags: ["docker"]
cache:
paths:
- cargohome
- target/
key: "build_cache-deb-$TARGET"
before_script:
- 'echo "Building debian package for target $TARGET"'
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
- "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging
- 'apt-get update -yqq'
- 'echo "Installing packages: $NEEDED_PACKAGES"'
- "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES"
- "rustup target add $TARGET"
- "cargo install cargo-deb"
script:
- time cargo deb --target $TARGET
- 'mv target/$TARGET/debian/*.deb "conduit-$TARGET.deb"'
build:cargo-deb:x86_64-unknown-linux-gnu:
extends: .build-cargo-deb-shared-settings
variables:
TARGET: "x86_64-unknown-linux-gnu"
NEEDED_PACKAGES: ""
artifacts:
name: "conduit-x86_64-unknown-linux-gnu.deb"
paths:
- "conduit-x86_64-unknown-linux-gnu.deb"
expose_as: "Debian Package x86_64"
# --------------------------------------------------------------------- # # --------------------------------------------------------------------- #
# Create and publish docker image # # Create and publish docker image #
# --------------------------------------------------------------------- # # --------------------------------------------------------------------- #
# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image
.docker-shared-settings: .docker-shared-settings:
stage: "build docker image" stage: "build docker image"
needs: [] image: jdrouet/docker-with-buildx:stable
interruptible: true
image:
name: "gcr.io/kaniko-project/executor:debug"
entrypoint: [""]
tags: ["docker"] tags: ["docker"]
variables: services:
# Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache - docker:dind
KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache"
before_script:
- "mkdir -p /kaniko/.docker"
- 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json'
build:docker:next:
extends: .docker-shared-settings
needs: needs:
- "build:release:cargo:x86_64-unknown-linux-musl" - "build:release:cargo:x86_64-unknown-linux-musl"
- "build:release:cargo:arm-unknown-linux-musleabihf"
- "build:release:cargo:armv7-unknown-linux-musleabihf"
- "build:release:cargo:aarch64-unknown-linux-musl"
variables:
DOCKER_HOST: tcp://docker:2375/
DOCKER_TLS_CERTDIR: ""
DOCKER_DRIVER: overlay2
PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64"
DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile"
cache:
paths:
- docker_cache
key: "$CI_JOB_NAME"
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
# Only log in to Dockerhub if the credentials are given:
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
script: script:
# Prepare buildx to build multiarch stuff:
- docker context create 'ci-context'
- docker buildx create --name 'multiarch-builder' --use 'ci-context'
# Copy binaries to their docker arch path
- mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64
- mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6
- mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7
- mv ./conduit-aarch64-unknown-linux-musl linux/arm64
- 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"'
# Build and push image:
- > - >
/kaniko/executor docker buildx build
$KANIKO_CACHE_ARGS --pull
--force --push
--context $CI_PROJECT_DIR --cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache
--build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache
--build-arg CREATED=$CREATED
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
--dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" --platform "$PLATFORMS"
--destination "$CI_REGISTRY_IMAGE/conduit:next" --tag "$TAG"
--destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" --tag "$TAG-alpine"
--destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" --tag "$TAG-commit-$CI_COMMIT_SHORT_SHA"
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" --file "$DOCKER_FILE" .
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine"
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" docker:next:gitlab:
extends: .docker-shared-settings
rules: rules:
- if: '$CI_COMMIT_BRANCH == "next"' - if: '$CI_COMMIT_BRANCH == "next"'
variables:
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
docker:next:dockerhub:
extends: .docker-shared-settings
rules:
- if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB'
variables:
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
build:docker:master: docker:master:gitlab:
extends: .docker-shared-settings extends: .docker-shared-settings
needs:
- "build:release:cargo:x86_64-unknown-linux-musl"
script:
- >
/kaniko/executor
$KANIKO_CACHE_ARGS
--context $CI_PROJECT_DIR
--build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
--dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
--destination "$CI_REGISTRY_IMAGE/conduit:latest"
--destination "$CI_REGISTRY_IMAGE/conduit:latest-alpine"
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest-alpine"
rules: rules:
- if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "master"'
variables:
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
docker:master:dockerhub:
build:docker:tags:
extends: .docker-shared-settings extends: .docker-shared-settings
needs:
- "build:release:cargo:x86_64-unknown-linux-musl"
script:
- >
/kaniko/executor
$KANIKO_CACHE_ARGS
--context $CI_PROJECT_DIR
--build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
--dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile"
--destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG"
--destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG-alpine"
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG"
--destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG-alpine"
rules: rules:
- if: '$CI_COMMIT_TAG' - if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB'
variables:
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
# --------------------------------------------------------------------- # # --------------------------------------------------------------------- #
# Run tests # # Run tests #
@ -287,9 +212,9 @@ build:docker:tags:
test:cargo: test:cargo:
stage: "test" stage: "test"
needs: [ ] needs: []
image: "rust:latest" image: "rust:latest"
tags: [ "docker" ] tags: ["docker"]
variables: variables:
CARGO_HOME: "cargohome" CARGO_HOME: "cargohome"
cache: cache:
@ -301,13 +226,20 @@ test:cargo:
before_script: before_script:
- mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps"
- apt-get update -yqq - apt-get update -yqq
- apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget
- rustup component add clippy rustfmt - rustup component add clippy rustfmt
- wget "https://faulty-storage.de/gitlab-report"
- chmod +x ./gitlab-report
script: script:
- rustc --version && cargo --version # Print version info for debugging - rustc --version && cargo --version # Print version info for debugging
- cargo fmt --all -- --check - cargo fmt --all -- --check
- cargo test --workspace --verbose --locked - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml"
- cargo clippy - "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
artifacts:
when: always
reports:
junit: report.xml
codequality: gl-code-quality-report.json
test:sytest: test:sytest:
stage: "test" stage: "test"
@ -316,8 +248,8 @@ test:sytest:
- "build:debug:cargo:x86_64-unknown-linux-musl" - "build:debug:cargo:x86_64-unknown-linux-musl"
image: image:
name: "valkum/sytest-conduit:latest" name: "valkum/sytest-conduit:latest"
entrypoint: [ "" ] entrypoint: [""]
tags: [ "docker" ] tags: ["docker"]
variables: variables:
PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz"
before_script: before_script:
@ -330,7 +262,7 @@ test:sytest:
script: script:
- "SYTEST_EXIT_CODE=0" - "SYTEST_EXIT_CODE=0"
- "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1" - "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1"
- "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap" - 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap'
- "exit $SYTEST_EXIT_CODE" - "exit $SYTEST_EXIT_CODE"
artifacts: artifacts:
when: always when: always
@ -340,7 +272,6 @@ test:sytest:
reports: reports:
junit: "$CI_PROJECT_DIR/sytest.xml" junit: "$CI_PROJECT_DIR/sytest.xml"
# --------------------------------------------------------------------- # # --------------------------------------------------------------------- #
# Store binaries as package so they have download urls # # Store binaries as package so they have download urls #
# --------------------------------------------------------------------- # # --------------------------------------------------------------------- #
@ -348,25 +279,31 @@ test:sytest:
publish:package: publish:package:
stage: "upload artifacts" stage: "upload artifacts"
needs: needs:
- "build:release:cargo:x86_64-unknown-linux-gnu"
- "build:release:cargo:armv7-unknown-linux-gnueabihf"
- "build:release:cargo:aarch64-unknown-linux-gnu"
- "build:release:cargo:x86_64-unknown-linux-musl" - "build:release:cargo:x86_64-unknown-linux-musl"
- "build:cargo-deb:x86_64-unknown-linux-gnu" - "build:release:cargo:arm-unknown-linux-musleabihf"
- "build:release:cargo:armv7-unknown-linux-musleabihf"
- "build:release:cargo:aarch64-unknown-linux-musl"
# - "build:cargo-deb:x86_64-unknown-linux-gnu"
rules: rules:
- if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "master"'
- if: '$CI_COMMIT_BRANCH == "next"' - if: '$CI_COMMIT_BRANCH == "next"'
- if: '$CI_COMMIT_TAG' - if: "$CI_COMMIT_TAG"
image: curlimages/curl:latest image: curlimages/curl:latest
tags: ["docker"] tags: ["docker"]
variables: variables:
GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
script: script:
- 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"'
# Avoid duplicate pipelines
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
workflow:
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
when: never
- if: "$CI_COMMIT_BRANCH"

92
Cargo.lock generated

@ -245,6 +245,7 @@ dependencies = [
"crossbeam", "crossbeam",
"directories", "directories",
"heed", "heed",
"hmac",
"http", "http",
"image", "image",
"jsonwebtoken", "jsonwebtoken",
@ -266,6 +267,7 @@ dependencies = [
"serde", "serde",
"serde_json", "serde_json",
"serde_yaml", "serde_yaml",
"sha-1",
"sled", "sled",
"thiserror", "thiserror",
"thread_local", "thread_local",
@ -428,6 +430,16 @@ dependencies = [
"lazy_static", "lazy_static",
] ]
[[package]]
name = "crypto-mac"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
dependencies = [
"generic-array",
"subtle",
]
[[package]] [[package]]
name = "curve25519-dalek" name = "curve25519-dalek"
version = "3.2.0" version = "3.2.0"
@ -897,6 +909,16 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "hmac"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b"
dependencies = [
"crypto-mac",
"digest",
]
[[package]] [[package]]
name = "hostname" name = "hostname"
version = "0.3.1" version = "0.3.1"
@ -1494,12 +1516,6 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "paste"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
[[package]] [[package]]
name = "pear" name = "pear"
version = "0.2.3" version = "0.2.3"
@ -1968,7 +1984,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma" name = "ruma"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"assign", "assign",
"js_int", "js_int",
@ -1988,8 +2004,8 @@ dependencies = [
[[package]] [[package]]
name = "ruma-api" name = "ruma-api"
version = "0.18.3" version = "0.18.5"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"bytes", "bytes",
"http", "http",
@ -2004,8 +2020,8 @@ dependencies = [
[[package]] [[package]]
name = "ruma-api-macros" name = "ruma-api-macros"
version = "0.18.3" version = "0.18.5"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2016,7 +2032,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-appservice-api" name = "ruma-appservice-api"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"ruma-api", "ruma-api",
"ruma-common", "ruma-common",
@ -2029,8 +2045,8 @@ dependencies = [
[[package]] [[package]]
name = "ruma-client-api" name = "ruma-client-api"
version = "0.12.2" version = "0.12.3"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"assign", "assign",
"bytes", "bytes",
@ -2050,7 +2066,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-common" name = "ruma-common"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"js_int", "js_int",
@ -2064,8 +2080,8 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events" name = "ruma-events"
version = "0.24.5" version = "0.24.6"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"indoc", "indoc",
"js_int", "js_int",
@ -2080,8 +2096,8 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events-macros" name = "ruma-events-macros"
version = "0.24.5" version = "0.24.6"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2092,7 +2108,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-federation-api" name = "ruma-federation-api"
version = "0.3.1" version = "0.3.1"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2107,9 +2123,9 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers" name = "ruma-identifiers"
version = "0.20.0" version = "0.20.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"paste", "percent-encoding",
"rand 0.8.4", "rand 0.8.4",
"ruma-identifiers-macros", "ruma-identifiers-macros",
"ruma-identifiers-validation", "ruma-identifiers-validation",
@ -2121,7 +2137,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-macros" name = "ruma-identifiers-macros"
version = "0.20.0" version = "0.20.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"quote", "quote",
"ruma-identifiers-validation", "ruma-identifiers-validation",
@ -2131,12 +2147,15 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-validation" name = "ruma-identifiers-validation"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [
"thiserror",
]
[[package]] [[package]]
name = "ruma-identity-service-api" name = "ruma-identity-service-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2149,7 +2168,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-push-gateway-api" name = "ruma-push-gateway-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2164,7 +2183,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-serde" name = "ruma-serde"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"bytes", "bytes",
"form_urlencoded", "form_urlencoded",
@ -2178,7 +2197,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-serde-macros" name = "ruma-serde-macros"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2189,7 +2208,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-signatures" name = "ruma-signatures"
version = "0.9.0" version = "0.9.0"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"ed25519-dalek", "ed25519-dalek",
@ -2206,7 +2225,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-state-res" name = "ruma-state-res"
version = "0.4.1" version = "0.4.1"
source = "git+https://github.com/ruma/ruma?rev=a6a1224652912a957b09f136ec5da2686be6e0e2#a6a1224652912a957b09f136ec5da2686be6e0e2" source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9"
dependencies = [ dependencies = [
"itertools 0.10.1", "itertools 0.10.1",
"js_int", "js_int",
@ -2418,6 +2437,19 @@ dependencies = [
"yaml-rust", "yaml-rust",
] ]
[[package]]
name = "sha-1"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6"
dependencies = [
"block-buffer",
"cfg-if 1.0.0",
"cpufeatures",
"digest",
"opaque-debug",
]
[[package]] [[package]]
name = "sha1" name = "sha1"
version = "0.6.0" version = "0.6.0"

10
Cargo.toml

@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request
# Used for matrix spec type definitions and helpers # Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "a6a1224652912a957b09f136ec5da2686be6e0e2", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
@ -40,7 +40,7 @@ serde_json = { version = "1.0.67", features = ["raw_value"] }
# Used for appservice registration files # Used for appservice registration files
serde_yaml = "0.8.20" serde_yaml = "0.8.20"
# Used for pdu definition # Used for pdu definition
serde = "1.0.130" serde = { version = "1.0.130", features = ["rc"] }
# Used for secure identifiers # Used for secure identifiers
rand = "0.8.4" rand = "0.8.4"
# Used to hash passwords # Used to hash passwords
@ -79,6 +79,9 @@ num_cpus = "1.13.0"
threadpool = "1.8.1" threadpool = "1.8.1"
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
thread_local = "1.1.3" thread_local = "1.1.3"
# used for TURN server authentication
hmac = "0.11.0"
sha-1 = "0.9.8"
[features] [features]
default = ["conduit_bin", "backend_sqlite"] default = ["conduit_bin", "backend_sqlite"]
@ -120,13 +123,12 @@ maintainer-scripts = "debian/"
systemd-units = { unit-name = "matrix-conduit" } systemd-units = { unit-name = "matrix-conduit" }
[profile.dev] [profile.dev]
lto = 'thin' lto = 'off'
incremental = true incremental = true
[profile.release] [profile.release]
lto = 'thin' lto = 'thin'
incremental = true incremental = true
codegen-units=32 codegen-units=32
# If you want to make flamegraphs, enable debug info: # If you want to make flamegraphs, enable debug info:
# debug = true # debug = true

55
DEPLOY.md

@ -2,25 +2,27 @@
## Getting help ## Getting help
If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us
in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
## Installing Conduit ## Installing Conduit
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore
only offer Linux binaries.
| CPU Architecture | GNU (Ubuntu, Debian, ArchLinux, ...) | MUSL (Alpine, ... ) |
| -------------------- | ------------------------------------- | ----------------------- |
| x84_64 / amd64 | [Download][x84_64-gnu] | [Download][x84_64-musl] |
| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - |
| armv8 / aarch64 | [Download][armv8-gnu] | - |
[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:release:cargo:x86_64-unknown-linux-gnu
[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:release:cargo:armv7-unknown-linux-gnueabihf | CPU Architecture | Download stable version |
| ------------------------------------------- | ------------------------------ |
| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] |
| armv6 | [Download][armv6-musl-master] |
| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] |
| armv8 / aarch64 | [Download][armv8-musl-master] |
[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:release:cargo:aarch64-unknown-linux-gnu [x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
```bash ```bash
$ sudo wget -O /usr/local/bin/matrix-conduit <url> $ sudo wget -O /usr/local/bin/matrix-conduit <url>
@ -32,15 +34,15 @@ Alternatively, you may compile the binary yourself using
```bash ```bash
$ cargo build --release $ cargo build --release
``` ```
Note that this currently requires Rust 1.50. Note that this currently requires Rust 1.50.
If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md).
## Adding a Conduit user ## Adding a Conduit user
While Conduit can run as any user it is usually better to use dedicated users for different services. While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
This also allows you to make sure that the file permissions are correctly set up. you to make sure that the file permissions are correctly set up.
In Debian you can use this command to create a Conduit user: In Debian you can use this command to create a Conduit user:
@ -50,9 +52,8 @@ sudo adduser --system conduit --no-create-home
## Setting up a systemd service ## Setting up a systemd service
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
Conduit and set it to autostart when your server reboots. Simply paste the server reboots. Simply paste the default systemd service you can find below into
default systemd service you can find below into
`/etc/systemd/system/conduit.service`. `/etc/systemd/system/conduit.service`.
```systemd ```systemd
@ -77,10 +78,10 @@ Finally, run
$ sudo systemctl daemon-reload $ sudo systemctl daemon-reload
``` ```
## Creating the Conduit configuration file ## Creating the Conduit configuration file
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.** Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
to read it. You need to change at least the server name.**
```toml ```toml
[global] [global]
@ -128,8 +129,8 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re
## Setting the correct file permissions ## Setting the correct file permissions
As we are using a Conduit specific user we need to allow it to read the config. As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
To do that you can run this command on Debian: Debian:
```bash ```bash
sudo chown -R conduit:nogroup /etc/matrix-conduit sudo chown -R conduit:nogroup /etc/matrix-conduit
@ -142,7 +143,6 @@ sudo mkdir -p /var/lib/matrix-conduit/conduit_db
sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db
``` ```
## Setting up the Reverse Proxy ## Setting up the Reverse Proxy
This depends on whether you use Apache, Nginx or another web server. This depends on whether you use Apache, Nginx or another web server.
@ -171,11 +171,9 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
$ sudo systemctl reload apache2 $ sudo systemctl reload apache2
``` ```
### Nginx ### Nginx
If you use Nginx and not Apache, add the following server section inside the If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
http section of `/etc/nginx/nginx.conf`
```nginx ```nginx
server { server {
@ -198,13 +196,13 @@ server {
include /etc/letsencrypt/options-ssl-nginx.conf; include /etc/letsencrypt/options-ssl-nginx.conf;
} }
``` ```
**You need to make some edits again.** When you are done, run **You need to make some edits again.** When you are done, run
```bash ```bash
$ sudo systemctl reload nginx $ sudo systemctl reload nginx
``` ```
## SSL Certificate ## SSL Certificate
The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this:
@ -213,7 +211,6 @@ The easiest way to get an SSL certificate, if you don't have one already, is to
$ sudo certbot -d your.server.name $ sudo certbot -d your.server.name
``` ```
## You're done! ## You're done!
Now you can start Conduit with: Now you can start Conduit with:

132
Dockerfile

@ -1,75 +1,65 @@
# Using multistage build: # syntax=docker/dockerfile:1
# https://docs.docker.com/develop/develop-images/multistage-build/ FROM docker.io/rust:1.53-alpine AS builder
# https://whitfin.io/speeding-up-rust-docker-builds/ WORKDIR /usr/src/conduit
# Install required packages to build Conduit and it's dependencies
RUN apk add musl-dev
# == Build dependencies without our own code separately for caching ==
#
# Need a fake main.rs since Cargo refuses to build anything otherwise.
#
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
# request that would allow just dependencies to be compiled, presumably
# regardless of whether source files are available.
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
COPY Cargo.toml Cargo.lock ./
RUN cargo build --release && rm -r src
# Copy over actual Conduit sources
COPY src src
# main.rs and lib.rs need their timestamp updated for this to work correctly since
# otherwise the build with the fake main.rs from above is newer than the
# source files (COPY preserves timestamps).
#
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
########################## BUILD IMAGE ##########################
# Alpine build image to build Conduit's statically compiled binary
FROM alpine:3.14 as builder
# Install packages needed for building all crates
RUN apk add --no-cache \
cargo \
openssl-dev
# Specifies if the local project is build or if Conduit gets build
# from the official git repository. Defaults to the git repo.
ARG LOCAL=false
# Specifies which revision/commit is build. Defaults to HEAD
ARG GIT_REF=origin/master
# Copy project files from current folder
COPY . .
# Build it from the copied local files or from the official git repository
RUN if [[ $LOCAL == "true" ]]; then \
mv ./docker/healthcheck.sh . ; \
echo "Building from local source..." ; \
cargo install --path . ; \
else \
echo "Building revision '${GIT_REF}' from online source..." ; \
cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF} ; \
echo "Loadings healthcheck script from online source..." ; \
wget "https://gitlab.com/famedly/conduit/-/raw/${GIT_REF#origin/}/docker/healthcheck.sh" ; \
fi
########################## RUNTIME IMAGE ##########################
# Create new stage with a minimal image for the actual
# runtime image/container
FROM alpine:3.14
ARG CREATED
ARG VERSION
ARG GIT_REF=origin/master
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # ---------------------------------------------------------------------------------------------------------------
# including a custom label specifying the build command # Stuff below this line actually ends up in the resulting docker image
LABEL org.opencontainers.image.created=${CREATED} \ # ---------------------------------------------------------------------------------------------------------------
org.opencontainers.image.authors="Conduit Contributors" \ FROM docker.io/alpine:3.15.0 AS runner
org.opencontainers.image.title="Conduit" \
org.opencontainers.image.version=${VERSION} \ # Standard port on which Conduit launches.
org.opencontainers.image.vendor="Conduit Contributors" \ # You still need to map the port when using the docker command or docker-compose.
org.opencontainers.image.description="A Matrix homeserver written in Rust" \
org.opencontainers.image.url="https://conduit.rs/" \
org.opencontainers.image.revision=${GIT_REF} \
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
org.opencontainers.image.licenses="Apache-2.0" \
org.opencontainers.image.documentation="" \
org.opencontainers.image.ref.name="" \
org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \
maintainer="Weasy666"
# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167 EXPOSE 6167
# Copy config files from context and the binary from # Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs.
# the "builder" stage to the current stage into folder ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# /srv/conduit and create data folder for database
# Conduit needs:
# ca-certificates: for https
# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \
ca-certificates \
libgcc
# Created directory for the database and media files
RUN mkdir -p /srv/conduit/.local/share/conduit RUN mkdir -p /srv/conduit/.local/share/conduit
COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/
COPY --from=builder ./healthcheck.sh /srv/conduit/
# Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Copy over the actual Conduit binary from the builder stage
COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit
# Improve security: Don't run stuff as root, that does not need to run as root:
# Add www-data user and group with UID 82, as used by alpine # Add www-data user and group with UID 82, as used by alpine
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
RUN set -x ; \ RUN set -x ; \
@ -79,19 +69,13 @@ RUN set -x ; \
# Change ownership of Conduit files to www-data user and group # Change ownership of Conduit files to www-data user and group
RUN chown -cR www-data:www-data /srv/conduit RUN chown -cR www-data:www-data /srv/conduit
RUN chmod +x /srv/conduit/healthcheck.sh
# Install packages needed to run Conduit # Change user to www-data
RUN apk add --no-cache \
ca-certificates \
curl \
libgcc
# Test if Conduit is still alive, uses the same endpoint as Element
HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh
# Set user to www-data
USER www-data USER www-data
# Set container home directory # Set container home directory
WORKDIR /srv/conduit WORKDIR /srv/conduit
# Run Conduit
# Run Conduit and print backtraces on panics
ENV RUST_BACKTRACE=1
ENTRYPOINT [ "/srv/conduit/conduit" ] ENTRYPOINT [ "/srv/conduit/conduit" ]

3
conduit-example.toml

@ -39,7 +39,8 @@ trusted_servers = ["matrix.org"]
#log = "info,state_res=warn,rocket=off,_=off,sled=off" #log = "info,state_res=warn,rocket=off,_=off,sled=off"
#workers = 4 # default: cpu core count * 2 #workers = 4 # default: cpu core count * 2
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy, if you are running conduit from docker, set this to 0.0.0.0 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
proxy = "none" # more examples can be found at src/database/proxy.rs:6 proxy = "none" # more examples can be found at src/database/proxy.rs:6

107
docker/README.md

@ -2,53 +2,41 @@
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate. > **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
## Docker ## Docker
### Build & Dockerfile ### Build & Dockerfile
The Dockerfile provided by Conduit has two stages, each of which creates an image. The Dockerfile provided by Conduit has two stages, each of which creates an image.
1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository.
2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
The Dockerfile includes a few build arguments that should be supplied when building it. 1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository.
2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
``` Dockerfile
ARG LOCAL=false
ARG CREATED
ARG VERSION
ARG GIT_REF=origin/master
```
- **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')`
- **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)`
- **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`.
- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `origin/master`.
To build the image you can use the following command To build the image you can use the following command
``` bash ```bash
docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker build --tag matrixconduit/matrix-conduit:latest .
``` ```
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`. which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
**Note:** it ommits the two optional `build-arg`s.
### Run ### Run
After building the image you can simply run it with After building the image you can simply run it with
``` bash ```bash
docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest
``` ```
or you can skip the build step and pull the image from one of the following registries: or you can skip the build step and pull the image from one of the following registries:
| Registry | Image | Size | | Registry | Image | Size |
| --------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | | --------------- | --------------------------------------------------------------- | --------------------- |
| Docker Hub | [matrixconduit/matrix-conduit:latest](https://hub.docker.com/r/matrixconduit/matrix-conduit) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | | Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] |
| GitLab Registry | [registry.gitlab.com/famedly/conduit/conduit:latest](https://gitlab.com/famedly/conduit/container_registry/2134341) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | | GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] |
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
[gl]: https://gitlab.com/famedly/conduit/container_registry/
[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml).
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
@ -56,29 +44,26 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
## Docker-compose ## Docker-compose
If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying
Conduit can be found [here](../DEPLOY.md). Conduit can be found [here](../DEPLOY.md).
### Build ### Build
To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with:
``` bash ```bash
CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up docker-compose up
``` ```
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section. This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
### Run ### Run
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with: If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
``` bash ```bash
docker-compose up -d docker-compose up -d
``` ```
@ -101,32 +86,36 @@ So...step by step:
3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. 3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. 4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
5. Create the files needed by the `well-known` service. 5. Create the files needed by the `well-known` service.
- `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping)
```nginx - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping)
server {
server_name <SUBDOMAIN>.<DOMAIN>; ```nginx
listen 80 default_server; server {
server_name <SUBDOMAIN>.<DOMAIN>;
location /.well-known/matrix/ { listen 80 default_server;
root /var/www;
default_type application/json; location /.well-known/matrix/ {
add_header Access-Control-Allow-Origin *; root /var/www;
} default_type application/json;
} add_header Access-Control-Allow-Origin *;
``` }
- `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) }
```json ```
{
"m.homeserver": { - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping)
"base_url": "https://<SUBDOMAIN>.<DOMAIN>" ```json
} {
} "m.homeserver": {
``` "base_url": "https://<SUBDOMAIN>.<DOMAIN>"
- `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) }
```json }
{ ```
"m.server": "<SUBDOMAIN>.<DOMAIN>:443" - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping)
} ```json
``` {
"m.server": "<SUBDOMAIN>.<DOMAIN>:443"
}
```
6. Run `docker-compose up -d` 6. Run `docker-compose up -d`
7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin. 7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin.

77
docker/ci-binaries-packaging.Dockerfile

@ -1,51 +1,62 @@
# syntax=docker/dockerfile:1
# --------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------
# This Dockerfile is intended to be built as part of Conduit's CI pipeline. # This Dockerfile is intended to be built as part of Conduit's CI pipeline.
# It does not build Conduit in Docker, but just copies the matching build artifact from the build job. # It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs.
# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary.
# #
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching. # It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
# Credit's for the original Dockerfile: Weasy666. # Credit's for the original Dockerfile: Weasy666.
# --------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------
FROM alpine:3.14 FROM docker.io/alpine:3.15.0 AS runner
# Install packages needed to run Conduit # Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167
# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs.
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# Conduit needs:
# ca-certificates: for https
# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big.
RUN apk add --no-cache \ RUN apk add --no-cache \
ca-certificates \ ca-certificates \
curl \ libgcc
libgcc
ARG CREATED ARG CREATED
ARG VERSION ARG VERSION
ARG GIT_REF ARG GIT_REF
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
# including a custom label specifying the build command # including a custom label specifying the build command
LABEL org.opencontainers.image.created=${CREATED} \ LABEL org.opencontainers.image.created=${CREATED} \
org.opencontainers.image.authors="Conduit Contributors" \ org.opencontainers.image.authors="Conduit Contributors" \
org.opencontainers.image.title="Conduit" \ org.opencontainers.image.title="Conduit" \
org.opencontainers.image.version=${VERSION} \ org.opencontainers.image.version=${VERSION} \
org.opencontainers.image.vendor="Conduit Contributors" \ org.opencontainers.image.vendor="Conduit Contributors" \
org.opencontainers.image.description="A Matrix homeserver written in Rust" \ org.opencontainers.image.description="A Matrix homeserver written in Rust" \
org.opencontainers.image.url="https://conduit.rs/" \ org.opencontainers.image.url="https://conduit.rs/" \
org.opencontainers.image.revision=${GIT_REF} \ org.opencontainers.image.revision=${GIT_REF} \
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
org.opencontainers.image.licenses="Apache-2.0" \ org.opencontainers.image.licenses="Apache-2.0" \
org.opencontainers.image.documentation="" \ org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
org.opencontainers.image.ref.name="" org.opencontainers.image.ref.name=""
# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167
# create data folder for database # Created directory for the database and media files
RUN mkdir -p /srv/conduit/.local/share/conduit RUN mkdir -p /srv/conduit/.local/share/conduit
# Copy the Conduit binary into the image at the latest possible moment to maximise caching: # Test if Conduit is still alive, uses the same endpoint as Element
COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
COPY ./docker/healthcheck.sh /srv/conduit/ HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64")
# copy the matching binary into this docker image
ARG TARGETPLATFORM
COPY ./$TARGETPLATFORM /srv/conduit/conduit
# Improve security: Don't run stuff as root, that does not need to run as root:
# Add www-data user and group with UID 82, as used by alpine # Add www-data user and group with UID 82, as used by alpine
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
RUN set -x ; \ RUN set -x ; \
@ -57,13 +68,11 @@ RUN set -x ; \
RUN chown -cR www-data:www-data /srv/conduit RUN chown -cR www-data:www-data /srv/conduit
RUN chmod +x /srv/conduit/healthcheck.sh RUN chmod +x /srv/conduit/healthcheck.sh
# Change user to www-data
# Test if Conduit is still alive, uses the same endpoint as Element
HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh
# Set user to www-data
USER www-data USER www-data
# Set container home directory # Set container home directory
WORKDIR /srv/conduit WORKDIR /srv/conduit
# Run Conduit
# Run Conduit and print backtraces on panics
ENV RUST_BACKTRACE=1
ENTRYPOINT [ "/srv/conduit/conduit" ] ENTRYPOINT [ "/srv/conduit/conduit" ]

6
docker/healthcheck.sh

@ -7,7 +7,7 @@ fi
# The actual health check. # The actual health check.
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. # We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
# TODO: Change this to a single curl call. Do we have a config value that we can check for that? # TODO: Change this to a single wget call. Do we have a config value that we can check for that?
curl --fail -s "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
curl -k --fail -s "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
exit 1 exit 1

2
rust-toolchain

@ -1 +1 @@
1.52 1.53

111
src/client_server/account.rs

@ -1,8 +1,4 @@
use std::{ use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
};
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
@ -11,23 +7,31 @@ use ruma::{
error::ErrorKind, error::ErrorKind,
r0::{ r0::{
account::{ account::{
change_password, deactivate, get_username_availability, register, whoami, change_password, deactivate, get_3pids, get_username_availability, register,
ThirdPartyIdRemovalStatus, whoami, ThirdPartyIdRemovalStatus,
}, },
contact::get_contacts, uiaa::{AuthFlow, AuthType, UiaaInfo},
uiaa::{AuthFlow, UiaaInfo},
}, },
}, },
events::{ events::{
room::{ room::{
canonical_alias, guest_access, history_visibility, join_rules, member, message, name, canonical_alias::RoomCanonicalAliasEventContent,
topic, create::RoomCreateEventContent,
guest_access::{GuestAccess, RoomGuestAccessEventContent},
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
join_rules::{JoinRule, RoomJoinRulesEventContent},
member::{MembershipState, RoomMemberEventContent},
message::RoomMessageEventContent,
name::RoomNameEventContent,
power_levels::RoomPowerLevelsEventContent,
topic::RoomTopicEventContent,
}, },
EventType, EventType,
}, },
identifiers::RoomName, identifiers::RoomName,
push, RoomAliasId, RoomId, RoomVersionId, UserId, push, RoomAliasId, RoomId, RoomVersionId, UserId,
}; };
use serde_json::value::to_raw_value;
use tracing::info; use tracing::info;
use register::RegistrationKind; use register::RegistrationKind;
@ -147,7 +151,7 @@ pub async fn register_route(
// UIAA // UIAA
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow { flows: vec![AuthFlow {
stages: vec!["m.login.dummy".to_owned()], stages: vec![AuthType::Dummy],
}], }],
completed: Vec::new(), completed: Vec::new(),
params: Default::default(), params: Default::default(),
@ -270,16 +274,16 @@ pub async fn register_route(
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
let mut content = ruma::events::room::create::CreateEventContent::new(conduit_user.clone()); let mut content = RoomCreateEventContent::new(conduit_user.clone());
content.federate = true; content.federate = true;
content.predecessor = None; content.predecessor = None;
content.room_version = RoomVersionId::Version6; content.room_version = RoomVersionId::V6;
// 1. The room create event // 1. The room create event
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomCreate, event_type: EventType::RoomCreate,
content: serde_json::to_value(content).expect("event is valid, we just created it"), content: to_raw_value(&content).expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
@ -294,14 +298,15 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(member::MemberEventContent { content: to_raw_value(&RoomMemberEventContent {
membership: member::MembershipState::Join, membership: MembershipState::Join,
displayname: None, displayname: None,
avatar_url: None, avatar_url: None,
is_direct: None, is_direct: None,
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -322,12 +327,10 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomPowerLevels, event_type: EventType::RoomPowerLevels,
content: serde_json::to_value( content: to_raw_value(&RoomPowerLevelsEventContent {
ruma::events::room::power_levels::PowerLevelsEventContent { users,
users, ..Default::default()
..Default::default() })
},
)
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
@ -343,10 +346,8 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomJoinRules, event_type: EventType::RoomJoinRules,
content: serde_json::to_value(join_rules::JoinRulesEventContent::new( content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
join_rules::JoinRule::Invite, .expect("event is valid, we just created it"),
))
.expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
@ -361,11 +362,9 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomHistoryVisibility, event_type: EventType::RoomHistoryVisibility,
content: serde_json::to_value( content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
history_visibility::HistoryVisibilityEventContent::new( HistoryVisibility::Shared,
history_visibility::HistoryVisibility::Shared, ))
),
)
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
@ -381,10 +380,8 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomGuestAccess, event_type: EventType::RoomGuestAccess,
content: serde_json::to_value(guest_access::GuestAccessEventContent::new( content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
guest_access::GuestAccess::Forbidden, .expect("event is valid, we just created it"),
))
.expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
@ -396,13 +393,12 @@ pub async fn register_route(
)?; )?;
// 6. Events implied by name and topic // 6. Events implied by name and topic
let room_name = let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name()))
Box::<RoomName>::try_from(format!("{} Admin Room", db.globals.server_name())) .expect("Room name is valid");
.expect("Room name is valid");
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomName, event_type: EventType::RoomName,
content: serde_json::to_value(name::NameEventContent::new(Some(room_name))) content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
@ -417,7 +413,7 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomTopic, event_type: EventType::RoomTopic,
content: serde_json::to_value(topic::TopicEventContent { content: to_raw_value(&RoomTopicEventContent {
topic: format!("Manage {}", db.globals.server_name()), topic: format!("Manage {}", db.globals.server_name()),
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
@ -432,14 +428,14 @@ pub async fn register_route(
)?; )?;
// Room alias // Room alias
let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name()) let alias: Box<RoomAliasId> = format!("#admins:{}", db.globals.server_name())
.try_into() .try_into()
.expect("#admins:server_name is a valid alias name"); .expect("#admins:server_name is a valid alias name");
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomCanonicalAlias, event_type: EventType::RoomCanonicalAlias,
content: serde_json::to_value(canonical_alias::CanonicalAliasEventContent { content: to_raw_value(&RoomCanonicalAliasEventContent {
alias: Some(alias.clone()), alias: Some(alias.clone()),
alt_aliases: Vec::new(), alt_aliases: Vec::new(),
}) })
@ -460,14 +456,15 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(member::MemberEventContent { content: to_raw_value(&RoomMemberEventContent {
membership: member::MembershipState::Invite, membership: MembershipState::Invite,
displayname: None, displayname: None,
avatar_url: None, avatar_url: None,
is_direct: None, is_direct: None,
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -482,14 +479,15 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(member::MemberEventContent { content: to_raw_value(&RoomMemberEventContent {
membership: member::MembershipState::Join, membership: MembershipState::Join,
displayname: Some(displayname), displayname: Some(displayname),
avatar_url: None, avatar_url: None,
is_direct: None, is_direct: None,
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -506,7 +504,7 @@ pub async fn register_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMessage, event_type: EventType::RoomMessage,
content: serde_json::to_value(message::MessageEventContent::text_html( content: to_raw_value(&RoomMessageEventContent::text_html(
"## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(),
"<h2>Thank you for trying out Conduit!</h2>\n<p>Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.</p>\n<p>Helpful links:</p>\n<blockquote>\n<p>Website: https://conduit.rs<br>Git and Documentation: https://gitlab.com/famedly/conduit<br>Report issues: https://gitlab.com/famedly/conduit/-/issues</p>\n</blockquote>\n<p>Here are some rooms you can join (by typing the command):</p>\n<p>Conduit room (Ask questions and get notified on updates):<br><code>/join #conduit:fachschaften.org</code></p>\n<p>Conduit lounge (Off-topic, only Conduit users are allowed to join)<br><code>/join #conduit-lounge:conduit.rs</code></p>\n".to_owned(), "<h2>Thank you for trying out Conduit!</h2>\n<p>Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.</p>\n<p>Helpful links:</p>\n<blockquote>\n<p>Website: https://conduit.rs<br>Git and Documentation: https://gitlab.com/famedly/conduit<br>Report issues: https://gitlab.com/famedly/conduit/-/issues</p>\n</blockquote>\n<p>Here are some rooms you can join (by typing the command):</p>\n<p>Conduit room (Ask questions and get notified on updates):<br><code>/join #conduit:fachschaften.org</code></p>\n<p>Conduit lounge (Off-topic, only Conduit users are allowed to join)<br><code>/join #conduit-lounge:conduit.rs</code></p>\n".to_owned(),
)) ))
@ -562,7 +560,7 @@ pub async fn change_password_route(
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow { flows: vec![AuthFlow {
stages: vec!["m.login.password".to_owned()], stages: vec![AuthType::Password],
}], }],
completed: Vec::new(), completed: Vec::new(),
params: Default::default(), params: Default::default(),
@ -654,7 +652,7 @@ pub async fn deactivate_route(
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow { flows: vec![AuthFlow {
stages: vec!["m.login.password".to_owned()], stages: vec![AuthType::Password],
}], }],
completed: Vec::new(), completed: Vec::new(),
params: Default::default(), params: Default::default(),
@ -698,14 +696,15 @@ pub async fn deactivate_route(
for room_id in all_rooms { for room_id in all_rooms {
let room_id = room_id?; let room_id = room_id?;
let event = member::MemberEventContent { let event = RoomMemberEventContent {
membership: member::MembershipState::Leave, membership: MembershipState::Leave,
displayname: None, displayname: None,
avatar_url: None, avatar_url: None,
is_direct: None, is_direct: None,
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}; };
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
@ -721,7 +720,7 @@ pub async fn deactivate_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(event).expect("event is valid, we just created it"), content: to_raw_value(&event).expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
@ -756,9 +755,9 @@ pub async fn deactivate_route(
get("/_matrix/client/r0/account/3pid", data = "<body>") get("/_matrix/client/r0/account/3pid", data = "<body>")
)] )]
pub async fn third_party_route( pub async fn third_party_route(
body: Ruma<get_contacts::Request>, body: Ruma<get_3pids::Request>,
) -> ConduitResult<get_contacts::Response> { ) -> ConduitResult<get_3pids::Response> {
let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(get_contacts::Response::new(Vec::new()).into()) Ok(get_3pids::Response::new(Vec::new()).into())
} }

6
src/client_server/capabilities.rs

@ -22,12 +22,12 @@ pub async fn get_capabilities_route(
_body: Ruma<get_capabilities::Request>, _body: Ruma<get_capabilities::Request>,
) -> ConduitResult<get_capabilities::Response> { ) -> ConduitResult<get_capabilities::Response> {
let mut available = BTreeMap::new(); let mut available = BTreeMap::new();
available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); available.insert(RoomVersionId::V5, RoomVersionStability::Stable);
available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); available.insert(RoomVersionId::V6, RoomVersionStability::Stable);
let mut capabilities = Capabilities::new(); let mut capabilities = Capabilities::new();
capabilities.room_versions = RoomVersionsCapability { capabilities.room_versions = RoomVersionsCapability {
default: RoomVersionId::Version6, default: RoomVersionId::V6,
available, available,
}; };

12
src/client_server/config.rs

@ -30,7 +30,7 @@ pub async fn set_global_account_data_route(
) -> ConduitResult<set_global_account_data::Response> { ) -> ConduitResult<set_global_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let data = serde_json::from_str::<serde_json::Value>(body.data.get()) let data: serde_json::Value = serde_json::from_str(body.data.get())
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
let event_type = body.event_type.to_string(); let event_type = body.event_type.to_string();
@ -68,7 +68,7 @@ pub async fn set_room_account_data_route(
) -> ConduitResult<set_room_account_data::Response> { ) -> ConduitResult<set_room_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let data = serde_json::from_str::<serde_json::Value>(body.data.get()) let data: serde_json::Value = serde_json::from_str(body.data.get())
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
let event_type = body.event_type.to_string(); let event_type = body.event_type.to_string();
@ -103,9 +103,9 @@ pub async fn get_global_account_data_route(
) -> ConduitResult<get_global_account_data::Response> { ) -> ConduitResult<get_global_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = db let event: Box<RawJsonValue> = db
.account_data .account_data
.get::<Box<RawJsonValue>>(None, sender_user, body.event_type.clone().into())? .get(None, sender_user, body.event_type.clone().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get()) let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
@ -132,9 +132,9 @@ pub async fn get_room_account_data_route(
) -> ConduitResult<get_room_account_data::Response> { ) -> ConduitResult<get_room_account_data::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = db let event: Box<RawJsonValue> = db
.account_data .account_data
.get::<Box<RawJsonValue>>( .get(
Some(&body.room_id), Some(&body.room_id),
sender_user, sender_user,
body.event_type.clone().into(), body.event_type.clone().into(),

16
src/client_server/context.rs

@ -48,7 +48,7 @@ pub async fn get_context_route(
))? ))?
.to_room_event(); .to_room_event();
let events_before = db let events_before: Vec<_> = db
.rooms .rooms
.pdus_until(sender_user, &body.room_id, base_token)? .pdus_until(sender_user, &body.room_id, base_token)?
.take( .take(
@ -58,19 +58,19 @@ pub async fn get_context_route(
/ 2, / 2,
) )
.filter_map(|r| r.ok()) // Remove buggy events .filter_map(|r| r.ok()) // Remove buggy events
.collect::<Vec<_>>(); .collect();
let start_token = events_before let start_token = events_before
.last() .last()
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
.map(|count| count.to_string()); .map(|count| count.to_string());
let events_before = events_before let events_before: Vec<_> = events_before
.into_iter() .into_iter()
.map(|(_, pdu)| pdu.to_room_event()) .map(|(_, pdu)| pdu.to_room_event())
.collect::<Vec<_>>(); .collect();
let events_after = db let events_after: Vec<_> = db
.rooms .rooms
.pdus_after(sender_user, &body.room_id, base_token)? .pdus_after(sender_user, &body.room_id, base_token)?
.take( .take(
@ -80,17 +80,17 @@ pub async fn get_context_route(
/ 2, / 2,
) )
.filter_map(|r| r.ok()) // Remove buggy events .filter_map(|r| r.ok()) // Remove buggy events
.collect::<Vec<_>>(); .collect();
let end_token = events_after let end_token = events_after
.last() .last()
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
.map(|count| count.to_string()); .map(|count| count.to_string());
let events_after = events_after let events_after: Vec<_> = events_after
.into_iter() .into_iter()
.map(|(_, pdu)| pdu.to_room_event()) .map(|(_, pdu)| pdu.to_room_event())
.collect::<Vec<_>>(); .collect();
let mut resp = get_context::Response::new(); let mut resp = get_context::Response::new();
resp.start = start_token; resp.start = start_token;

10
src/client_server/device.rs

@ -3,7 +3,7 @@ use ruma::api::client::{
error::ErrorKind, error::ErrorKind,
r0::{ r0::{
device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
uiaa::{AuthFlow, UiaaInfo}, uiaa::{AuthFlow, AuthType, UiaaInfo},
}, },
}; };
@ -25,11 +25,11 @@ pub async fn get_devices_route(
) -> ConduitResult<get_devices::Response> { ) -> ConduitResult<get_devices::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let devices = db let devices: Vec<device::Device> = db
.users .users
.all_devices_metadata(sender_user) .all_devices_metadata(sender_user)
.filter_map(|r| r.ok()) // Filter out buggy devices .filter_map(|r| r.ok()) // Filter out buggy devices
.collect::<Vec<device::Device>>(); .collect();
Ok(get_devices::Response { devices }.into()) Ok(get_devices::Response { devices }.into())
} }
@ -109,7 +109,7 @@ pub async fn delete_device_route(
// UIAA // UIAA
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow { flows: vec![AuthFlow {
stages: vec!["m.login.password".to_owned()], stages: vec![AuthType::Password],
}], }],
completed: Vec::new(), completed: Vec::new(),
params: Default::default(), params: Default::default(),
@ -172,7 +172,7 @@ pub async fn delete_devices_route(
// UIAA // UIAA
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow { flows: vec![AuthFlow {
stages: vec!["m.login.password".to_owned()], stages: vec![AuthType::Password],
}], }],
completed: Vec::new(), completed: Vec::new(),
params: Default::default(), params: Default::default(),

256
src/client_server/directory.rs

@ -17,10 +17,16 @@ use ruma::{
}, },
directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork}, directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork},
events::{ events::{
room::{avatar, canonical_alias, guest_access, history_visibility, name, topic}, room::{
avatar::RoomAvatarEventContent,
canonical_alias::RoomCanonicalAliasEventContent,
guest_access::{GuestAccess, RoomGuestAccessEventContent},
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
name::RoomNameEventContent,
topic::RoomTopicEventContent,
},
EventType, EventType,
}, },
serde::Raw,
ServerName, UInt, ServerName, UInt,
}; };
use tracing::{info, warn}; use tracing::{info, warn};
@ -161,7 +167,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
other_server, other_server,
federation::directory::get_public_rooms_filtered::v1::Request { federation::directory::get_public_rooms_filtered::v1::Request {
limit, limit,
since: since.as_deref(), since,
filter: Filter { filter: Filter {
generic_search_term: filter.generic_search_term.as_deref(), generic_search_term: filter.generic_search_term.as_deref(),
}, },
@ -217,167 +223,139 @@ pub(crate) async fn get_public_rooms_filtered_helper(
} }
} }
let mut all_rooms = let mut all_rooms: Vec<_> = db
db.rooms .rooms
.public_rooms() .public_rooms()
.map(|room_id| { .map(|room_id| {
let room_id = room_id?; let room_id = room_id?;
let chunk = PublicRoomsChunk { let chunk = PublicRoomsChunk {
aliases: Vec::new(), aliases: Vec::new(),
canonical_alias: db canonical_alias: db
.rooms .rooms
.room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")?
.map_or(Ok::<_, Error>(None), |s| { .map_or(Ok(None), |s| {
Ok(serde_json::from_value::< serde_json::from_str(s.content.get())
Raw<canonical_alias::CanonicalAliasEventContent>, .map(|c: RoomCanonicalAliasEventContent| c.alias)
>(s.content.clone())
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid canonical alias event in database.") Error::bad_database("Invalid canonical alias event in database.")
})? })
.alias) })?,
})?, name: db
name: db .rooms
.rooms .room_state_get(&room_id, &EventType::RoomName, "")?
.room_state_get(&room_id, &EventType::RoomName, "")? .map_or(Ok(None), |s| {
.map_or(Ok::<_, Error>(None), |s| { serde_json::from_str(s.content.get())
Ok(serde_json::from_value::<Raw<name::NameEventContent>>( .map(|c: RoomNameEventContent| c.name)
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid room name event in database.") Error::bad_database("Invalid room name event in database.")
})? })
.name) })?,
})?, num_joined_members: db
num_joined_members: db .rooms
.rooms .room_joined_count(&room_id)?
.room_joined_count(&room_id)? .unwrap_or_else(|| {
.unwrap_or_else(|| { warn!("Room {} has no member count", room_id);
warn!("Room {} has no member count", room_id); 0
0 })
}) .try_into()
.try_into() .expect("user count should not be that big"),
.expect("user count should not be that big"), topic: db
topic: db .rooms
.rooms .room_state_get(&room_id, &EventType::RoomTopic, "")?
.room_state_get(&room_id, &EventType::RoomTopic, "")? .map_or(Ok(None), |s| {
.map_or(Ok::<_, Error>(None), |s| { serde_json::from_str(s.content.get())
Ok(Some( .map(|c: RoomTopicEventContent| Some(c.topic))
serde_json::from_value::<Raw<topic::TopicEventContent>>( .map_err(|_| {
s.content.clone(), Error::bad_database("Invalid room topic event in database.")
) })
.expect("from_value::<Raw<..>> can never fail") })?,
.deserialize() world_readable: db
.map_err(|_| { .rooms
Error::bad_database("Invalid room topic event in database.") .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")?
})? .map_or(Ok(false), |s| {
.topic, serde_json::from_str(s.content.get())
)) .map(|c: RoomHistoryVisibilityEventContent| {
})?, c.history_visibility == HistoryVisibility::WorldReadable
world_readable: db })
.rooms
.room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")?
.map_or(Ok::<_, Error>(false), |s| {
Ok(serde_json::from_value::<
Raw<history_visibility::HistoryVisibilityEventContent>,
>(s.content.clone())
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| { .map_err(|_| {
Error::bad_database( Error::bad_database(
"Invalid room history visibility event in database.", "Invalid room history visibility event in database.",
) )
})? })
.history_visibility })?,
== history_visibility::HistoryVisibility::WorldReadable) guest_can_join: db
})?, .rooms
guest_can_join: db .room_state_get(&room_id, &EventType::RoomGuestAccess, "")?
.rooms .map_or(Ok(false), |s| {
.room_state_get(&room_id, &EventType::RoomGuestAccess, "")? serde_json::from_str(s.content.get())
.map_or(Ok::<_, Error>(false), |s| { .map(|c: RoomGuestAccessEventContent| {
Ok( c.guest_access == GuestAccess::CanJoin
serde_json::from_value::<Raw<guest_access::GuestAccessEventContent>>( })
s.content.clone(),
)
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid room guest access event in database.") Error::bad_database("Invalid room guest access event in database.")
})? })
.guest_access })?,
== guest_access::GuestAccess::CanJoin, avatar_url: db
) .rooms
})?, .room_state_get(&room_id, &EventType::RoomAvatar, "")?
avatar_url: db .map(|s| {
.rooms serde_json::from_str(s.content.get())
.room_state_get(&room_id, &EventType::RoomAvatar, "")? .map(|c: RoomAvatarEventContent| c.url)
.map(|s| { .map_err(|_| {
Ok::<_, Error>( Error::bad_database("Invalid room avatar event in database.")
serde_json::from_value::<Raw<avatar::AvatarEventContent>>( })
s.content.clone(), })
) .transpose()?
.expect("from_value::<Raw<..>> can never fail") // url is now an Option<String> so we must flatten
.deserialize() .flatten(),
.map_err(|_| { room_id,
Error::bad_database("Invalid room avatar event in database.") };
})? Ok(chunk)
.url, })
) .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
}) .filter(|chunk| {
.transpose()? if let Some(query) = filter
// url is now an Option<String> so we must flatten .generic_search_term
.flatten(), .as_ref()
room_id, .map(|q| q.to_lowercase())
}; {
Ok(chunk) if let Some(name) = &chunk.name {
}) if name.as_str().to_lowercase().contains(&query) {
.filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms return true;
.filter(|chunk| {
if let Some(query) = filter
.generic_search_term
.as_ref()
.map(|q| q.to_lowercase())
{
if let Some(name) = &chunk.name {
if name.as_str().to_lowercase().contains(&query) {
return true;
}
} }
}
if let Some(topic) = &chunk.topic { if let Some(topic) = &chunk.topic {
if topic.to_lowercase().contains(&query) { if topic.to_lowercase().contains(&query) {
return true; return true;
}
} }
}
if let Some(canonical_alias) = &chunk.canonical_alias { if let Some(canonical_alias) = &chunk.canonical_alias {
if canonical_alias.as_str().to_lowercase().contains(&query) { if canonical_alias.as_str().to_lowercase().contains(&query) {
return true; return true;
}
} }
false
} else {
// No search term
true
} }
})
// We need to collect all, so we can sort by member count false
.collect::<Vec<_>>(); } else {
// No search term
true
}
})
// We need to collect all, so we can sort by member count
.collect();
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
let total_room_count_estimate = (all_rooms.len() as u32).into(); let total_room_count_estimate = (all_rooms.len() as u32).into();
let chunk = all_rooms let chunk: Vec<_> = all_rooms
.into_iter() .into_iter()
.skip(num_since as usize) .skip(num_since as usize)
.take(limit as usize) .take(limit as usize)
.collect::<Vec<_>>(); .collect();
let prev_batch = if num_since == 0 { let prev_batch = if num_since == 0 {
None None

30
src/client_server/keys.rs

@ -10,7 +10,7 @@ use ruma::{
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
upload_signing_keys, upload_signing_keys,
}, },
uiaa::{AuthFlow, UiaaInfo}, uiaa::{AuthFlow, AuthType, UiaaInfo},
}, },
}, },
federation, federation,
@ -148,7 +148,7 @@ pub async fn upload_signing_keys_route(
// UIAA // UIAA
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow { flows: vec![AuthFlow {
stages: vec!["m.login.password".to_owned()], stages: vec![AuthType::Password],
}], }],
completed: Vec::new(), completed: Vec::new(),
params: Default::default(), params: Default::default(),
@ -316,7 +316,7 @@ pub async fn get_key_changes_route(
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>( pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
sender_user: Option<&UserId>, sender_user: Option<&UserId>,
device_keys_input: &BTreeMap<UserId, Vec<Box<DeviceId>>>, device_keys_input: &BTreeMap<Box<UserId>, Vec<Box<DeviceId>>>,
allowed_signatures: F, allowed_signatures: F,
db: &Database, db: &Database,
) -> Result<get_keys::Response> { ) -> Result<get_keys::Response> {
@ -328,6 +328,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
let mut get_over_federation = HashMap::new(); let mut get_over_federation = HashMap::new();
for (user_id, device_ids) in device_keys_input { for (user_id, device_ids) in device_keys_input {
let user_id: &UserId = &**user_id;
if user_id.server_name() != db.globals.server_name() { if user_id.server_name() != db.globals.server_name() {
get_over_federation get_over_federation
.entry(user_id.server_name()) .entry(user_id.server_name())
@ -355,11 +357,11 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
container.insert(device_id, keys); container.insert(device_id, keys);
} }
} }
device_keys.insert(user_id.clone(), container); device_keys.insert(user_id.to_owned(), container);
} else { } else {
for device_id in device_ids { for device_id in device_ids {
let mut container = BTreeMap::new(); let mut container = BTreeMap::new();
if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), device_id)? { if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? {
let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
Error::BadRequest( Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -371,36 +373,36 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
device_display_name: metadata.display_name, device_display_name: metadata.display_name,
}; };
container.insert(device_id.clone(), keys); container.insert(device_id.to_owned(), keys);
} }
device_keys.insert(user_id.clone(), container); device_keys.insert(user_id.to_owned(), container);
} }
} }
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
master_keys.insert(user_id.clone(), master_key); master_keys.insert(user_id.to_owned(), master_key);
} }
if let Some(self_signing_key) = db if let Some(self_signing_key) = db
.users .users
.get_self_signing_key(user_id, &allowed_signatures)? .get_self_signing_key(user_id, &allowed_signatures)?
{ {
self_signing_keys.insert(user_id.clone(), self_signing_key); self_signing_keys.insert(user_id.to_owned(), self_signing_key);
} }
if Some(user_id) == sender_user { if Some(user_id) == sender_user {
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
user_signing_keys.insert(user_id.clone(), user_signing_key); user_signing_keys.insert(user_id.to_owned(), user_signing_key);
} }
} }
} }
let mut failures = BTreeMap::new(); let mut failures = BTreeMap::new();
let mut futures = get_over_federation let mut futures: FuturesUnordered<_> = get_over_federation
.into_iter() .into_iter()
.map(|(server, vec)| async move { .map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new(); let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec { for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.clone(), keys.clone()); device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
} }
( (
server, server,
@ -415,7 +417,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
.await, .await,
) )
}) })
.collect::<FuturesUnordered<_>>(); .collect();
while let Some((server, response)) = futures.next().await { while let Some((server, response)) = futures.next().await {
match response { match response {
@ -440,7 +442,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
} }
pub(crate) async fn claim_keys_helper( pub(crate) async fn claim_keys_helper(
one_time_keys_input: &BTreeMap<UserId, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>, one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
db: &Database, db: &Database,
) -> Result<claim_keys::Response> { ) -> Result<claim_keys::Response> {
let mut one_time_keys = BTreeMap::new(); let mut one_time_keys = BTreeMap::new();

212
src/client_server/membership.rs

@ -1,10 +1,9 @@
use crate::{ use crate::{
client_server, client_server,
database::DatabaseGuard, database::DatabaseGuard,
pdu::{PduBuilder, PduEvent}, pdu::{EventHash, PduBuilder, PduEvent},
server_server, utils, ConduitResult, Database, Error, Result, Ruma, server_server, utils, ConduitResult, Database, Error, Result, Ruma,
}; };
use member::{MemberEventContent, MembershipState};
use ruma::{ use ruma::{
api::{ api::{
client::{ client::{
@ -18,17 +17,21 @@ use ruma::{
federation::{self, membership::create_invite}, federation::{self, membership::create_invite},
}, },
events::{ events::{
pdu::Pdu, room::{
room::{create::CreateEventContent, member}, create::RoomCreateEventContent,
member::{MembershipState, RoomMemberEventContent},
},
EventType, EventType,
}, },
serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue, Raw}, serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue},
state_res::{self, RoomVersion}, state_res::{self, RoomVersion},
uint, EventId, RoomId, RoomVersionId, ServerName, UserId, uint, EventId, RoomId, RoomVersionId, ServerName, UserId,
}; };
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
iter,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
@ -54,25 +57,23 @@ pub async fn join_room_by_id_route(
) -> ConduitResult<join_room_by_id::Response> { ) -> ConduitResult<join_room_by_id::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut servers = db let mut servers: HashSet<_> = db
.rooms .rooms
.invite_state(sender_user, &body.room_id)? .invite_state(sender_user, &body.room_id)?
.unwrap_or_default() .unwrap_or_default()
.iter() .iter()
.filter_map(|event| { .filter_map(|event| serde_json::from_str(event.json().get()).ok())
serde_json::from_str::<serde_json::Value>(&event.json().to_string()).ok() .filter_map(|event: serde_json::Value| event.get("sender").cloned())
})
.filter_map(|event| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::try_from(sender).ok()) .filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()) .map(|user| user.server_name().to_owned())
.collect::<HashSet<_>>(); .collect();
servers.insert(body.room_id.server_name().to_owned()); servers.insert(body.room_id.server_name().to_owned());
let ret = join_room_by_id_helper( let ret = join_room_by_id_helper(
&db, &db,
body.sender_user.as_ref(), body.sender_user.as_deref(),
&body.room_id, &body.room_id,
&servers, &servers,
body.third_party_signed.as_ref(), body.third_party_signed.as_ref(),
@ -99,23 +100,22 @@ pub async fn join_room_by_id_or_alias_route(
db: DatabaseGuard, db: DatabaseGuard,
body: Ruma<join_room_by_id_or_alias::Request<'_>>, body: Ruma<join_room_by_id_or_alias::Request<'_>>,
) -> ConduitResult<join_room_by_id_or_alias::Response> { ) -> ConduitResult<join_room_by_id_or_alias::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_deref().expect("user is authenticated");
let body = body.body;
let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) { let (servers, room_id) = match Box::<RoomId>::try_from(body.room_id_or_alias) {
Ok(room_id) => { Ok(room_id) => {
let mut servers = db let mut servers: HashSet<_> = db
.rooms .rooms
.invite_state(sender_user, &room_id)? .invite_state(sender_user, &room_id)?
.unwrap_or_default() .unwrap_or_default()
.iter() .iter()
.filter_map(|event| { .filter_map(|event| serde_json::from_str(event.json().get()).ok())
serde_json::from_str::<serde_json::Value>(&event.json().to_string()).ok() .filter_map(|event: serde_json::Value| event.get("sender").cloned())
})
.filter_map(|event| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::try_from(sender).ok()) .filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()) .map(|user| user.server_name().to_owned())
.collect::<HashSet<_>>(); .collect();
servers.insert(room_id.server_name().to_owned()); servers.insert(room_id.server_name().to_owned());
(servers, room_id) (servers, room_id)
@ -129,7 +129,7 @@ pub async fn join_room_by_id_or_alias_route(
let join_room_response = join_room_by_id_helper( let join_room_response = join_room_by_id_helper(
&db, &db,
body.sender_user.as_ref(), Some(sender_user),
&room_id, &room_id,
&servers, &servers,
body.third_party_signed.as_ref(), body.third_party_signed.as_ref(),
@ -204,7 +204,7 @@ pub async fn kick_user_route(
) -> ConduitResult<kick_user::Response> { ) -> ConduitResult<kick_user::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut event = serde_json::from_value::<Raw<ruma::events::room::member::MemberEventContent>>( let mut event: RoomMemberEventContent = serde_json::from_str(
db.rooms db.rooms
.room_state_get( .room_state_get(
&body.room_id, &body.room_id,
@ -216,13 +216,11 @@ pub async fn kick_user_route(
"Cannot kick member that's not in the room.", "Cannot kick member that's not in the room.",
))? ))?
.content .content
.clone(), .get(),
) )
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid member event in database."))?; .map_err(|_| Error::bad_database("Invalid member event in database."))?;
event.membership = ruma::events::room::member::MembershipState::Leave; event.membership = MembershipState::Leave;
// TODO: reason // TODO: reason
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
@ -238,7 +236,7 @@ pub async fn kick_user_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(event).expect("event is valid, we just created it"), content: to_raw_value(&event).expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
@ -280,24 +278,23 @@ pub async fn ban_user_route(
&body.user_id.to_string(), &body.user_id.to_string(),
)? )?
.map_or( .map_or(
Ok::<_, Error>(member::MemberEventContent { Ok(RoomMemberEventContent {
membership: member::MembershipState::Ban, membership: MembershipState::Ban,
displayname: db.users.displayname(&body.user_id)?, displayname: db.users.displayname(&body.user_id)?,
avatar_url: db.users.avatar_url(&body.user_id)?, avatar_url: db.users.avatar_url(&body.user_id)?,
is_direct: None, is_direct: None,
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}), }),
|event| { |event| {
let mut event = serde_json::from_value::<Raw<member::MemberEventContent>>( serde_json::from_str(event.content.get())
event.content.clone(), .map(|event: RoomMemberEventContent| RoomMemberEventContent {
) membership: MembershipState::Ban,
.expect("Raw::from_value always works") ..event
.deserialize() })
.map_err(|_| Error::bad_database("Invalid member event in database."))?; .map_err(|_| Error::bad_database("Invalid member event in database."))
event.membership = ruma::events::room::member::MembershipState::Ban;
Ok(event)
}, },
)?; )?;
@ -314,7 +311,7 @@ pub async fn ban_user_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(event).expect("event is valid, we just created it"), content: to_raw_value(&event).expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
@ -346,7 +343,7 @@ pub async fn unban_user_route(
) -> ConduitResult<unban_user::Response> { ) -> ConduitResult<unban_user::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut event = serde_json::from_value::<Raw<ruma::events::room::member::MemberEventContent>>( let mut event: RoomMemberEventContent = serde_json::from_str(
db.rooms db.rooms
.room_state_get( .room_state_get(
&body.room_id, &body.room_id,
@ -358,13 +355,11 @@ pub async fn unban_user_route(
"Cannot unban a user who is not banned.", "Cannot unban a user who is not banned.",
))? ))?
.content .content
.clone(), .get(),
) )
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| Error::bad_database("Invalid member event in database."))?; .map_err(|_| Error::bad_database("Invalid member event in database."))?;
event.membership = ruma::events::room::member::MembershipState::Leave; event.membership = MembershipState::Leave;
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
db.globals db.globals
@ -379,7 +374,7 @@ pub async fn unban_user_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(event).expect("event is valid, we just created it"), content: to_raw_value(&event).expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
@ -539,7 +534,7 @@ async fn join_room_by_id_helper(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -559,7 +554,7 @@ async fn join_room_by_id_helper(
federation::membership::create_join_event_template::v1::Request { federation::membership::create_join_event_template::v1::Request {
room_id, room_id,
user_id: sender_user, user_id: sender_user,
ver: &[RoomVersionId::Version5, RoomVersionId::Version6], ver: &[RoomVersionId::V5, RoomVersionId::V6],
}, },
) )
.await; .await;
@ -575,19 +570,17 @@ async fn join_room_by_id_helper(
let room_version = match make_join_response.room_version { let room_version = match make_join_response.room_version {
Some(room_version) Some(room_version)
if room_version == RoomVersionId::Version5 if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 =>
|| room_version == RoomVersionId::Version6 =>
{ {
room_version room_version
} }
_ => return Err(Error::BadServerResponse("Room version is not supported")), _ => return Err(Error::BadServerResponse("Room version is not supported")),
}; };
let mut join_event_stub = let mut join_event_stub: CanonicalJsonObject =
serde_json::from_str::<CanonicalJsonObject>(make_join_response.event.json().get()) serde_json::from_str(make_join_response.event.get()).map_err(|_| {
.map_err(|_| { Error::BadServerResponse("Invalid make_join event json received from server.")
Error::BadServerResponse("Invalid make_join event json received from server.") })?;
})?;
// TODO: Is origin needed? // TODO: Is origin needed?
join_event_stub.insert( join_event_stub.insert(
@ -604,14 +597,15 @@ async fn join_room_by_id_helper(
); );
join_event_stub.insert( join_event_stub.insert(
"content".to_owned(), "content".to_owned(),
to_canonical_value(member::MemberEventContent { to_canonical_value(RoomMemberEventContent {
membership: member::MembershipState::Join, membership: MembershipState::Join,
displayname: db.users.displayname(sender_user)?, displayname: db.users.displayname(sender_user)?,
avatar_url: db.users.avatar_url(sender_user)?, avatar_url: db.users.avatar_url(sender_user)?,
is_direct: None, is_direct: None,
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
); );
@ -629,12 +623,13 @@ async fn join_room_by_id_helper(
.expect("event is valid, we just created it"); .expect("event is valid, we just created it");
// Generate event id // Generate event id
let event_id = EventId::try_from(&*format!( let event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&join_event_stub, &room_version) ruma::signatures::reference_hash(&join_event_stub, &room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) );
.expect("ruma's reference hashes are valid event ids"); let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids");
// Add event_id back // Add event_id back
join_event_stub.insert( join_event_stub.insert(
@ -652,15 +647,15 @@ async fn join_room_by_id_helper(
remote_server, remote_server,
federation::membership::create_join_event::v2::Request { federation::membership::create_join_event::v2::Request {
room_id, room_id,
event_id: &event_id, event_id,
pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()),
}, },
) )
.await?; .await?;
db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; db.rooms.get_or_create_shortroomid(room_id, &db.globals)?;
let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) let pdu = PduEvent::from_id_val(event_id, join_event.clone())
.map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?;
let mut state = HashMap::new(); let mut state = HashMap::new();
@ -723,7 +718,7 @@ async fn join_room_by_id_helper(
state state
.into_iter() .into_iter()
.map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals)) .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals))
.collect::<Result<HashSet<_>>>()?, .collect::<Result<_>>()?,
db, db,
)?; )?;
@ -748,7 +743,7 @@ async fn join_room_by_id_helper(
db.rooms.append_pdu( db.rooms.append_pdu(
&pdu, &pdu,
utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"),
&[pdu.event_id.clone()], iter::once(&*pdu.event_id),
db, db,
)?; )?;
@ -756,20 +751,21 @@ async fn join_room_by_id_helper(
// where events in the current room state do not exist // where events in the current room state do not exist
db.rooms.set_room_state(room_id, statehashid)?; db.rooms.set_room_state(room_id, statehashid)?;
} else { } else {
let event = member::MemberEventContent { let event = RoomMemberEventContent {
membership: member::MembershipState::Join, membership: MembershipState::Join,
displayname: db.users.displayname(sender_user)?, displayname: db.users.displayname(sender_user)?,
avatar_url: db.users.avatar_url(sender_user)?, avatar_url: db.users.avatar_url(sender_user)?,
is_direct: None, is_direct: None,
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}; };
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(event).expect("event is valid, we just created it"), content: to_raw_value(&event).expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
@ -785,20 +781,20 @@ async fn join_room_by_id_helper(
db.flush()?; db.flush()?;
Ok(join_room_by_id::Response::new(room_id.clone()).into()) Ok(join_room_by_id::Response::new(room_id.to_owned()).into())
} }
fn validate_and_add_event_id( fn validate_and_add_event_id(
pdu: &Raw<Pdu>, pdu: &RawJsonValue,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
db: &Database, db: &Database,
) -> Result<(EventId, CanonicalJsonObject)> { ) -> Result<(Box<EventId>, CanonicalJsonObject)> {
let mut value = serde_json::from_str::<CanonicalJsonObject>(pdu.json().get()).map_err(|e| { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response") Error::BadServerResponse("Invalid PDU in server response")
})?; })?;
let event_id = EventId::try_from(&*format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
@ -865,32 +861,29 @@ pub(crate) async fn invite_helper<'a>(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
let prev_events = db let prev_events: Vec<_> = db
.rooms .rooms
.get_pdu_leaves(room_id)? .get_pdu_leaves(room_id)?
.into_iter() .into_iter()
.take(20) .take(20)
.collect::<Vec<_>>(); .collect();
let create_event = db let create_event = db
.rooms .rooms
.room_state_get(room_id, &EventType::RoomCreate, "")?; .room_state_get(room_id, &EventType::RoomCreate, "")?;
let create_event_content = create_event let create_event_content: Option<RoomCreateEventContent> = create_event
.as_ref() .as_ref()
.map(|create_event| { .map(|create_event| {
serde_json::from_value::<Raw<CreateEventContent>>(create_event.content.clone()) serde_json::from_str(create_event.content.get()).map_err(|e| {
.expect("Raw::from_value always works.") warn!("Invalid create event: {}", e);
.deserialize() Error::bad_database("Invalid create event in db.")
.map_err(|e| { })
warn!("Invalid create event: {}", e);
Error::bad_database("Invalid create event in db.")
})
}) })
.transpose()?; .transpose()?;
@ -904,13 +897,11 @@ pub(crate) async fn invite_helper<'a>(
// If there was no create event yet, assume we are creating a version 6 room right now // If there was no create event yet, assume we are creating a version 6 room right now
let room_version_id = create_event_content let room_version_id = create_event_content
.map_or(RoomVersionId::Version6, |create_event| { .map_or(RoomVersionId::V6, |create_event| create_event.room_version);
create_event.room_version
});
let room_version = let room_version =
RoomVersion::new(&room_version_id).expect("room version is supported"); RoomVersion::new(&room_version_id).expect("room version is supported");
let content = serde_json::to_value(MemberEventContent { let content = to_raw_value(&RoomMemberEventContent {
avatar_url: None, avatar_url: None,
displayname: None, displayname: None,
is_direct: Some(is_direct), is_direct: Some(is_direct),
@ -918,6 +909,7 @@ pub(crate) async fn invite_helper<'a>(
third_party_invite: None, third_party_invite: None,
blurhash: None, blurhash: None,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("member event is valid value"); .expect("member event is valid value");
@ -946,14 +938,14 @@ pub(crate) async fn invite_helper<'a>(
unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone());
unsigned.insert( unsigned.insert(
"prev_sender".to_owned(), "prev_sender".to_owned(),
serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), to_raw_value(&prev_pdu.sender).expect("UserId is valid"),
); );
} }
let pdu = PduEvent { let pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater"), event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: room_id.clone(), room_id: room_id.to_owned(),
sender: sender_user.clone(), sender: sender_user.to_owned(),
origin_server_ts: utils::millis_since_unix_epoch() origin_server_ts: utils::millis_since_unix_epoch()
.try_into() .try_into()
.expect("time is valid"), .expect("time is valid"),
@ -967,11 +959,15 @@ pub(crate) async fn invite_helper<'a>(
.map(|(_, pdu)| pdu.event_id.clone()) .map(|(_, pdu)| pdu.event_id.clone())
.collect(), .collect(),
redacts: None, redacts: None,
unsigned, unsigned: if unsigned.is_empty() {
hashes: ruma::events::pdu::EventHash { None
} else {
Some(to_raw_value(&unsigned).expect("to_raw_value always works"))
},
hashes: EventHash {
sha256: "aaa".to_owned(), sha256: "aaa".to_owned(),
}, },
signatures: BTreeMap::new(), signatures: None,
}; };
let auth_check = state_res::auth_check( let auth_check = state_res::auth_check(
@ -1022,12 +1018,13 @@ pub(crate) async fn invite_helper<'a>(
}; };
// Generate event id // Generate event id
let expected_event_id = EventId::try_from(&*format!( let expected_event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id) ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) );
.expect("ruma's reference hashes are valid event ids"); let expected_event_id = <&EventId>::try_from(expected_event_id.as_str())
.expect("ruma's reference hashes are valid event ids");
let response = db let response = db
.sending .sending
@ -1035,11 +1032,11 @@ pub(crate) async fn invite_helper<'a>(
&db.globals, &db.globals,
user_id.server_name(), user_id.server_name(),
create_invite::v2::Request { create_invite::v2::Request {
room_id: room_id.clone(), room_id,
event_id: expected_event_id.clone(), event_id: expected_event_id,
room_version: room_version_id, room_version: &room_version_id,
event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()),
invite_room_state, invite_room_state: &invite_room_state,
}, },
) )
.await?; .await?;
@ -1062,7 +1059,7 @@ pub(crate) async fn invite_helper<'a>(
warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value);
} }
let origin = serde_json::from_value::<Box<ServerName>>( let origin: Box<ServerName> = serde_json::from_value(
serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Event needs an origin field.", "Event needs an origin field.",
@ -1108,7 +1105,7 @@ pub(crate) async fn invite_helper<'a>(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
@ -1116,14 +1113,15 @@ pub(crate) async fn invite_helper<'a>(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(member::MemberEventContent { content: to_raw_value(&RoomMemberEventContent {
membership: member::MembershipState::Invite, membership: MembershipState::Invite,
displayname: db.users.displayname(user_id)?, displayname: db.users.displayname(user_id)?,
avatar_url: db.users.avatar_url(user_id)?, avatar_url: db.users.avatar_url(user_id)?,
is_direct: Some(is_direct), is_direct: Some(is_direct),
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(user_id)?, blurhash: db.users.blurhash(user_id)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,

47
src/client_server/message.rs

@ -5,13 +5,8 @@ use ruma::{
r0::message::{get_message_events, send_message_event}, r0::message::{get_message_events, send_message_event},
}, },
events::EventType, events::EventType,
EventId,
};
use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
}; };
use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::{get, put}; use rocket::{get, put};
@ -45,6 +40,14 @@ pub async fn send_message_event_route(
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
// Forbid m.room.encrypted if encryption is disabled
if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption has been disabled",
));
}
// Check if this is a new transaction id // Check if this is a new transaction id
if let Some(response) = if let Some(response) =
db.transaction_ids db.transaction_ids
@ -59,11 +62,10 @@ pub async fn send_message_event_route(
)); ));
} }
let event_id = EventId::try_from( let event_id = utils::string_from_bytes(&response)
utils::string_from_bytes(&response) .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?, .try_into()
) .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
return Ok(send_message_event::Response { event_id }.into()); return Ok(send_message_event::Response { event_id }.into());
} }
@ -96,7 +98,7 @@ pub async fn send_message_event_route(
db.flush()?; db.flush()?;
Ok(send_message_event::Response::new(event_id).into()) Ok(send_message_event::Response::new((*event_id).to_owned()).into())
} }
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages` /// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
@ -132,14 +134,11 @@ pub async fn get_message_events_route(
let to = body.to.as_ref().map(|t| t.parse()); let to = body.to.as_ref().map(|t| t.parse());
// Use limit or else 10 // Use limit or else 10
let limit = body let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
.limit
.try_into()
.map_or(Ok::<_, Error>(10_usize), |l: u32| Ok(l as usize))?;
match body.dir { match body.dir {
get_message_events::Direction::Forward => { get_message_events::Direction::Forward => {
let events_after = db let events_after: Vec<_> = db
.rooms .rooms
.pdus_after(sender_user, &body.room_id, from)? .pdus_after(sender_user, &body.room_id, from)?
.take(limit) .take(limit)
@ -151,14 +150,14 @@ pub async fn get_message_events_route(
.ok() .ok()
}) })
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
.collect::<Vec<_>>(); .collect();
let end_token = events_after.last().map(|(count, _)| count.to_string()); let end_token = events_after.last().map(|(count, _)| count.to_string());
let events_after = events_after let events_after: Vec<_> = events_after
.into_iter() .into_iter()
.map(|(_, pdu)| pdu.to_room_event()) .map(|(_, pdu)| pdu.to_room_event())
.collect::<Vec<_>>(); .collect();
let mut resp = get_message_events::Response::new(); let mut resp = get_message_events::Response::new();
resp.start = Some(body.from.to_owned()); resp.start = Some(body.from.to_owned());
@ -169,7 +168,7 @@ pub async fn get_message_events_route(
Ok(resp.into()) Ok(resp.into())
} }
get_message_events::Direction::Backward => { get_message_events::Direction::Backward => {
let events_before = db let events_before: Vec<_> = db
.rooms .rooms
.pdus_until(sender_user, &body.room_id, from)? .pdus_until(sender_user, &body.room_id, from)?
.take(limit) .take(limit)
@ -181,14 +180,14 @@ pub async fn get_message_events_route(
.ok() .ok()
}) })
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
.collect::<Vec<_>>(); .collect();
let start_token = events_before.last().map(|(count, _)| count.to_string()); let start_token = events_before.last().map(|(count, _)| count.to_string());
let events_before = events_before let events_before: Vec<_> = events_before
.into_iter() .into_iter()
.map(|(_, pdu)| pdu.to_room_event()) .map(|(_, pdu)| pdu.to_room_event())
.collect::<Vec<_>>(); .collect();
let mut resp = get_message_events::Response::new(); let mut resp = get_message_events::Response::new();
resp.start = Some(body.from.to_owned()); resp.start = Some(body.from.to_owned());

2
src/client_server/mod.rs

@ -16,6 +16,7 @@ mod profile;
mod push; mod push;
mod read_marker; mod read_marker;
mod redact; mod redact;
mod report;
mod room; mod room;
mod search; mod search;
mod session; mod session;
@ -47,6 +48,7 @@ pub use profile::*;
pub use push::*; pub use push::*;
pub use read_marker::*; pub use read_marker::*;
pub use redact::*; pub use redact::*;
pub use report::*;
pub use room::*; pub use room::*;
pub use search::*; pub use search::*;
pub use session::*; pub use session::*;

20
src/client_server/profile.rs

@ -9,9 +9,9 @@ use ruma::{
}, },
federation::{self, query::get_profile_information::v1::ProfileField}, federation::{self, query::get_profile_information::v1::ProfileField},
}, },
events::EventType, events::{room::member::RoomMemberEventContent, EventType},
serde::Raw,
}; };
use serde_json::value::to_raw_value;
use std::{convert::TryInto, sync::Arc}; use std::{convert::TryInto, sync::Arc};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
@ -45,9 +45,9 @@ pub async fn set_displayname_route(
Ok::<_, Error>(( Ok::<_, Error>((
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(ruma::events::room::member::MemberEventContent { content: to_raw_value(&RoomMemberEventContent {
displayname: body.displayname.clone(), displayname: body.displayname.clone(),
..serde_json::from_value::<Raw<_>>( ..serde_json::from_str(
db.rooms db.rooms
.room_state_get( .room_state_get(
&room_id, &room_id,
@ -61,10 +61,8 @@ pub async fn set_displayname_route(
) )
})? })?
.content .content
.clone(), .get(),
) )
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| Error::bad_database("Database contains invalid PDU."))? .map_err(|_| Error::bad_database("Database contains invalid PDU."))?
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
@ -190,9 +188,9 @@ pub async fn set_avatar_url_route(
Ok::<_, Error>(( Ok::<_, Error>((
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(ruma::events::room::member::MemberEventContent { content: to_raw_value(&RoomMemberEventContent {
avatar_url: body.avatar_url.clone(), avatar_url: body.avatar_url.clone(),
..serde_json::from_value::<Raw<_>>( ..serde_json::from_str(
db.rooms db.rooms
.room_state_get( .room_state_get(
&room_id, &room_id,
@ -206,10 +204,8 @@ pub async fn set_avatar_url_route(
) )
})? })?
.content .content
.clone(), .get(),
) )
.expect("from_value::<Raw<..>> can never fail")
.deserialize()
.map_err(|_| Error::bad_database("Database contains invalid PDU."))? .map_err(|_| Error::bad_database("Database contains invalid PDU."))?
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),

44
src/client_server/push.rs

@ -8,7 +8,7 @@ use ruma::{
set_pushrule_enabled, RuleKind, set_pushrule_enabled, RuleKind,
}, },
}, },
events::{push_rules, EventType}, events::{push_rules::PushRulesEvent, EventType},
push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit},
}; };
@ -29,9 +29,9 @@ pub async fn get_pushrules_all_route(
) -> ConduitResult<get_pushrules_all::Response> { ) -> ConduitResult<get_pushrules_all::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = db let event: PushRulesEvent = db
.account_data .account_data
.get::<push_rules::PushRulesEvent>(None, sender_user, EventType::PushRules)? .get(None, sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"PushRules event not found.", "PushRules event not found.",
@ -57,9 +57,9 @@ pub async fn get_pushrule_route(
) -> ConduitResult<get_pushrule::Response> { ) -> ConduitResult<get_pushrule::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = db let event: PushRulesEvent = db
.account_data .account_data
.get::<push_rules::PushRulesEvent>(None, sender_user, EventType::PushRules)? .get(None, sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"PushRules event not found.", "PushRules event not found.",
@ -105,15 +105,15 @@ pub async fn get_pushrule_route(
/// Creates a single specified push rule for this user. /// Creates a single specified push rule for this user.
#[cfg_attr( #[cfg_attr(
feature = "conduit_bin", feature = "conduit_bin",
put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<req>") put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "<body>")
)] )]
#[tracing::instrument(skip(db, req))] #[tracing::instrument(skip(db, body))]
pub async fn set_pushrule_route( pub async fn set_pushrule_route(
db: DatabaseGuard, db: DatabaseGuard,
req: Ruma<set_pushrule::Request<'_>>, body: Ruma<set_pushrule::Request<'_>>,
) -> ConduitResult<set_pushrule::Response> { ) -> ConduitResult<set_pushrule::Response> {
let sender_user = req.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = req.body; let body = body.body;
if body.scope != "global" { if body.scope != "global" {
return Err(Error::BadRequest( return Err(Error::BadRequest(
@ -122,9 +122,9 @@ pub async fn set_pushrule_route(
)); ));
} }
let mut event = db let mut event: PushRulesEvent = db
.account_data .account_data
.get::<push_rules::PushRulesEvent>(None, sender_user, EventType::PushRules)? .get(None, sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"PushRules event not found.", "PushRules event not found.",
@ -222,9 +222,9 @@ pub async fn get_pushrule_actions_route(
)); ));
} }
let mut event = db let mut event: PushRulesEvent = db
.account_data .account_data
.get::<push_rules::PushRulesEvent>(None, sender_user, EventType::PushRules)? .get(None, sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"PushRules event not found.", "PushRules event not found.",
@ -284,9 +284,9 @@ pub async fn set_pushrule_actions_route(
)); ));
} }
let mut event = db let mut event: PushRulesEvent = db
.account_data .account_data
.get::<push_rules::PushRulesEvent>(None, sender_user, EventType::PushRules)? .get(None, sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"PushRules event not found.", "PushRules event not found.",
@ -356,9 +356,9 @@ pub async fn get_pushrule_enabled_route(
)); ));
} }
let mut event = db let mut event: PushRulesEvent = db
.account_data .account_data
.get::<push_rules::PushRulesEvent>(None, sender_user, EventType::PushRules)? .get(None, sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"PushRules event not found.", "PushRules event not found.",
@ -420,9 +420,9 @@ pub async fn set_pushrule_enabled_route(
)); ));
} }
let mut event = db let mut event: PushRulesEvent = db
.account_data .account_data
.get::<ruma::events::push_rules::PushRulesEvent>(None, sender_user, EventType::PushRules)? .get(None, sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"PushRules event not found.", "PushRules event not found.",
@ -497,9 +497,9 @@ pub async fn delete_pushrule_route(
)); ));
} }
let mut event = db let mut event: PushRulesEvent = db
.account_data .account_data
.get::<push_rules::PushRulesEvent>(None, sender_user, EventType::PushRules)? .get(None, sender_user, EventType::PushRules)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"PushRules event not found.", "PushRules event not found.",

9
src/client_server/redact.rs

@ -3,11 +3,12 @@ use std::sync::Arc;
use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma}; use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma};
use ruma::{ use ruma::{
api::client::r0::redact::redact_event, api::client::r0::redact::redact_event,
events::{room::redaction, EventType}, events::{room::redaction::RoomRedactionEventContent, EventType},
}; };
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::put; use rocket::put;
use serde_json::value::to_raw_value;
/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}`
/// ///
@ -24,6 +25,7 @@ pub async fn redact_event_route(
body: Ruma<redact_event::Request<'_>>, body: Ruma<redact_event::Request<'_>>,
) -> ConduitResult<redact_event::Response> { ) -> ConduitResult<redact_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
db.globals db.globals
@ -38,13 +40,13 @@ pub async fn redact_event_route(
let event_id = db.rooms.build_and_append_pdu( let event_id = db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomRedaction, event_type: EventType::RoomRedaction,
content: serde_json::to_value(redaction::RedactionEventContent { content: to_raw_value(&RoomRedactionEventContent {
reason: body.reason.clone(), reason: body.reason.clone(),
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: Some(body.event_id.clone()), redacts: Some(body.event_id.into()),
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -56,5 +58,6 @@ pub async fn redact_event_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(redact_event::Response { event_id }.into()) Ok(redact_event::Response { event_id }.into())
} }

84
src/client_server/report.rs

@ -0,0 +1,84 @@
use crate::{
database::{admin::AdminCommand, DatabaseGuard},
ConduitResult, Error, Ruma,
};
use ruma::{
api::client::{error::ErrorKind, r0::room::report_content},
events::room::message,
int,
};
#[cfg(feature = "conduit_bin")]
use rocket::{http::RawStr, post};
/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
///
/// Reports an inappropriate event to homeserver admins
///
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/rooms/<_>/report/<_>", data = "<body>")
)]
#[tracing::instrument(skip(db, body))]
pub async fn report_event_route(
db: DatabaseGuard,
body: Ruma<report_content::Request<'_>>,
) -> ConduitResult<report_content::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let pdu = match db.rooms.get_pdu(&body.event_id)? {
Some(pdu) => pdu,
_ => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid Event ID",
))
}
};
if body.score > int!(0) || body.score < int!(-100) {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid score, must be within 0 to -100",
));
};
if body.reason.chars().count() > 250 {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Reason too long, should be 250 characters or fewer",
));
};
db.admin.send(AdminCommand::SendMessage(
message::RoomMessageEventContent::text_html(
format!(
"Report received from: {}\n\n\
Event ID: {}\n\
Room ID: {}\n\
Sent By: {}\n\n\
Report Score: {}\n\
Report Reason: {}",
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
),
format!(
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0}\">{0}\
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1}</code>\
<a href=\"https://matrix.to/#/{2}/{1}\">🔗</a></li><li>Room ID: <code>{2}</code>\
</li><li>Sent By: <a href=\"https://matrix.to/#/{3}\">{3}</a></li></ul></li><li>\
Report Info<ul><li>Report Score: {4}</li><li>Report Reason: {5}</li></ul></li>\
</ul></details>",
sender_user,
pdu.event_id,
pdu.room_id,
pdu.sender,
body.score,
RawStr::new(&body.reason).html_escape()
),
),
));
db.flush()?;
Ok(report_content::Response {}.into())
}

275
src/client_server/room.rs

@ -8,13 +8,26 @@ use ruma::{
r0::room::{self, aliases, create_room, get_room_event, upgrade_room}, r0::room::{self, aliases, create_room, get_room_event, upgrade_room},
}, },
events::{ events::{
room::{guest_access, history_visibility, join_rules, member, name, topic}, room::{
canonical_alias::RoomCanonicalAliasEventContent,
create::RoomCreateEventContent,
guest_access::{GuestAccess, RoomGuestAccessEventContent},
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
join_rules::{JoinRule, RoomJoinRulesEventContent},
member::{MembershipState, RoomMemberEventContent},
name::RoomNameEventContent,
power_levels::RoomPowerLevelsEventContent,
tombstone::RoomTombstoneEventContent,
topic::RoomTopicEventContent,
},
EventType, EventType,
}, },
serde::Raw, int,
serde::{CanonicalJsonObject, JsonObject},
RoomAliasId, RoomId, RoomVersionId, RoomAliasId, RoomId, RoomVersionId,
}; };
use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; use serde_json::{json, value::to_raw_value};
use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc};
use tracing::{info, warn}; use tracing::{info, warn};
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
@ -61,14 +74,26 @@ pub async fn create_room_route(
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
let alias: Option<RoomAliasId> = if !db.globals.allow_room_creation()
&& !body.from_appservice
&& !db.users.is_admin(sender_user, &db.rooms, &db.globals)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Room creation has been disabled.",
));
}
let alias: Option<Box<RoomAliasId>> =
body.room_alias_name body.room_alias_name
.as_ref() .as_ref()
.map_or(Ok(None), |localpart| { .map_or(Ok(None), |localpart| {
// TODO: Check for invalid characters and maximum length // TODO: Check for invalid characters and maximum length
let alias = let alias =
RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; .map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.")
})?;
if db.rooms.id_from_alias(&alias)?.is_some() { if db.rooms.id_from_alias(&alias)?.is_some() {
Err(Error::BadRequest( Err(Error::BadRequest(
@ -80,12 +105,9 @@ pub async fn create_room_route(
} }
})?; })?;
let mut content = ruma::events::room::create::CreateEventContent::new(sender_user.clone()); let room_version = match body.room_version.clone() {
content.federate = body.creation_content.federate;
content.predecessor = body.creation_content.predecessor.clone();
content.room_version = match body.room_version.clone() {
Some(room_version) => { Some(room_version) => {
if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 {
room_version room_version
} else { } else {
return Err(Error::BadRequest( return Err(Error::BadRequest(
@ -94,14 +116,64 @@ pub async fn create_room_route(
)); ));
} }
} }
None => RoomVersionId::Version6, None => RoomVersionId::V6,
}; };
let content = match &body.creation_content {
Some(content) => {
let mut content = content
.deserialize_as::<CanonicalJsonObject>()
.expect("Invalid creation content");
content.insert(
"creator".into(),
json!(&sender_user).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
})?,
);
content.insert(
"room_version".into(),
json!(room_version.as_str()).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
})?,
);
content
}
None => {
let mut content = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
.get(),
)
.unwrap();
content.insert(
"room_version".into(),
json!(room_version.as_str()).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")
})?,
);
content
}
};
// Validate creation content
let de_result = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&content)
.expect("Invalid creation content")
.get(),
);
if de_result.is_err() {
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Invalid creation content",
));
}
// 1. The room create event // 1. The room create event
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomCreate, event_type: EventType::RoomCreate,
content: serde_json::to_value(content).expect("event is valid, we just created it"), content: to_raw_value(&content).expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
@ -116,14 +188,15 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(member::MemberEventContent { content: to_raw_value(&RoomMemberEventContent {
membership: member::MembershipState::Join, membership: MembershipState::Join,
displayname: db.users.displayname(sender_user)?, displayname: db.users.displayname(sender_user)?,
avatar_url: db.users.avatar_url(sender_user)?, avatar_url: db.users.avatar_url(sender_user)?,
is_direct: Some(body.is_direct), is_direct: Some(body.is_direct),
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -149,28 +222,25 @@ pub async fn create_room_route(
}); });
let mut users = BTreeMap::new(); let mut users = BTreeMap::new();
users.insert(sender_user.clone(), 100.into()); users.insert(sender_user.clone(), int!(100));
if preset == create_room::RoomPreset::TrustedPrivateChat { if preset == create_room::RoomPreset::TrustedPrivateChat {
for invite_ in &body.invite { for invite_ in &body.invite {
users.insert(invite_.clone(), 100.into()); users.insert(invite_.clone(), int!(100));
} }
} }
let mut power_levels_content = let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent {
serde_json::to_value(ruma::events::room::power_levels::PowerLevelsEventContent { users,
users, ..Default::default()
..Default::default() })
}) .expect("event is valid, we just created it");
.expect("event is valid, we just created it");
if let Some(power_level_content_override) = &body.power_level_content_override { if let Some(power_level_content_override) = &body.power_level_content_override {
let json = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>( let json: JsonObject = serde_json::from_str(power_level_content_override.json().get())
power_level_content_override.json().get(), .map_err(|_| {
) Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.")
.map_err(|_| { })?;
Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.")
})?;
for (key, value) in json { for (key, value) in json {
power_levels_content[key] = value; power_levels_content[key] = value;
@ -180,7 +250,8 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomPowerLevels, event_type: EventType::RoomPowerLevels,
content: power_levels_content, content: to_raw_value(&power_levels_content)
.expect("to_raw_value always works on serde_json::Value"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
@ -196,12 +267,10 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomCanonicalAlias, event_type: EventType::RoomCanonicalAlias,
content: serde_json::to_value( content: to_raw_value(&RoomCanonicalAliasEventContent {
ruma::events::room::canonical_alias::CanonicalAliasEventContent { alias: Some(room_alias_id.to_owned()),
alias: Some(room_alias_id.clone()), alt_aliases: vec![],
alt_aliases: vec![], })
},
)
.expect("We checked that alias earlier, it must be fine"), .expect("We checked that alias earlier, it must be fine"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
@ -220,17 +289,12 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomJoinRules, event_type: EventType::RoomJoinRules,
content: match preset { content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
create_room::RoomPreset::PublicChat => serde_json::to_value( create_room::RoomPreset::PublicChat => JoinRule::Public,
join_rules::JoinRulesEventContent::new(join_rules::JoinRule::Public),
)
.expect("event is valid, we just created it"),
// according to spec "invite" is the default // according to spec "invite" is the default
_ => serde_json::to_value(join_rules::JoinRulesEventContent::new( _ => JoinRule::Invite,
join_rules::JoinRule::Invite, }))
)) .expect("event is valid, we just created it"),
.expect("event is valid, we just created it"),
},
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
@ -245,8 +309,8 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomHistoryVisibility, event_type: EventType::RoomHistoryVisibility,
content: serde_json::to_value(history_visibility::HistoryVisibilityEventContent::new( content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
history_visibility::HistoryVisibility::Shared, HistoryVisibility::Shared,
)) ))
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -263,18 +327,11 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomGuestAccess, event_type: EventType::RoomGuestAccess,
content: match preset { content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
create_room::RoomPreset::PublicChat => { create_room::RoomPreset::PublicChat => GuestAccess::Forbidden,
serde_json::to_value(guest_access::GuestAccessEventContent::new( _ => GuestAccess::CanJoin,
guest_access::GuestAccess::Forbidden, }))
)) .expect("event is valid, we just created it"),
.expect("event is valid, we just created it")
}
_ => serde_json::to_value(guest_access::GuestAccessEventContent::new(
guest_access::GuestAccess::CanJoin,
))
.expect("event is valid, we just created it"),
},
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
@ -306,7 +363,7 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomName, event_type: EventType::RoomName,
content: serde_json::to_value(name::NameEventContent::new(Some(name.clone()))) content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
@ -323,7 +380,7 @@ pub async fn create_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomTopic, event_type: EventType::RoomTopic,
content: serde_json::to_value(topic::TopicEventContent { content: to_raw_value(&RoomTopicEventContent {
topic: topic.clone(), topic: topic.clone(),
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
@ -426,7 +483,7 @@ pub async fn get_room_aliases_route(
.into()) .into())
} }
/// # `GET /_matrix/client/r0/rooms/{roomId}/upgrade` /// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade`
/// ///
/// Upgrades the room. /// Upgrades the room.
/// ///
@ -447,10 +504,7 @@ pub async fn upgrade_room_route(
) -> ConduitResult<upgrade_room::Response> { ) -> ConduitResult<upgrade_room::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if !matches!( if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) {
body.new_version,
RoomVersionId::Version5 | RoomVersionId::Version6
) {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::UnsupportedRoomVersion, ErrorKind::UnsupportedRoomVersion,
"This server does not support that room version.", "This server does not support that room version.",
@ -477,8 +531,8 @@ pub async fn upgrade_room_route(
let tombstone_event_id = db.rooms.build_and_append_pdu( let tombstone_event_id = db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomTombstone, event_type: EventType::RoomTombstone,
content: serde_json::to_value(ruma::events::room::tombstone::TombstoneEventContent { content: to_raw_value(&RoomTombstoneEventContent {
body: "This room has been replaced".to_string(), body: "This room has been replaced".to_owned(),
replacement_room: replacement_room.clone(), replacement_room: replacement_room.clone(),
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
@ -504,36 +558,60 @@ pub async fn upgrade_room_route(
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
// Get the old room federations status // Get the old room creation event
let federate = serde_json::from_value::<Raw<ruma::events::room::create::CreateEventContent>>( let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
db.rooms db.rooms
.room_state_get(&body.room_id, &EventType::RoomCreate, "")? .room_state_get(&body.room_id, &EventType::RoomCreate, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content .content
.clone(), .get(),
) )
.expect("Raw::from_value always works") .map_err(|_| Error::bad_database("Invalid room event in database."))?;
.deserialize()
.map_err(|_| Error::bad_database("Invalid room event in database."))?
.federate;
// Use the m.room.tombstone event as the predecessor // Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new( let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
body.room_id.clone(), body.room_id.clone(),
tombstone_event_id, (*tombstone_event_id).to_owned(),
)); ));
// Send a m.room.create event containing a predecessor field and the applicable room_version // Send a m.room.create event containing a predecessor field and the applicable room_version
let mut create_event_content = create_event_content.insert(
ruma::events::room::create::CreateEventContent::new(sender_user.clone()); "creator".into(),
create_event_content.federate = federate; json!(&sender_user)
create_event_content.room_version = body.new_version.clone(); .try_into()
create_event_content.predecessor = predecessor; .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
);
create_event_content.insert(
"room_version".into(),
json!(&body.new_version)
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
);
create_event_content.insert(
"predecessor".into(),
json!(predecessor)
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?,
);
// Validate creation event content
let de_result = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&create_event_content)
.expect("Error forming creation event")
.get(),
);
if de_result.is_err() {
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Error forming creation event",
));
}
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomCreate, event_type: EventType::RoomCreate,
content: serde_json::to_value(create_event_content) content: to_raw_value(&create_event_content)
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
@ -549,14 +627,15 @@ pub async fn upgrade_room_route(
db.rooms.build_and_append_pdu( db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMember, event_type: EventType::RoomMember,
content: serde_json::to_value(member::MemberEventContent { content: to_raw_value(&RoomMemberEventContent {
membership: member::MembershipState::Join, membership: MembershipState::Join,
displayname: db.users.displayname(sender_user)?, displayname: db.users.displayname(sender_user)?,
avatar_url: db.users.avatar_url(sender_user)?, avatar_url: db.users.avatar_url(sender_user)?,
is_direct: None, is_direct: None,
third_party_invite: None, third_party_invite: None,
blurhash: db.users.blurhash(sender_user)?, blurhash: db.users.blurhash(sender_user)?,
reason: None, reason: None,
join_authorized_via_users_server: None,
}) })
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
@ -611,23 +690,17 @@ pub async fn upgrade_room_route(
} }
// Get the old room power levels // Get the old room power levels
let mut power_levels_event_content = let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
serde_json::from_value::<Raw<ruma::events::room::power_levels::PowerLevelsEventContent>>( db.rooms
db.rooms .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")?
.room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content
.content .get(),
.clone(), )
) .map_err(|_| Error::bad_database("Invalid room event in database."))?;
.expect("database contains invalid PDU")
.deserialize()
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
// Setting events_default and invite to the greater of 50 and users_default + 1 // Setting events_default and invite to the greater of 50 and users_default + 1
let new_level = max( let new_level = max(int!(50), power_levels_event_content.users_default + int!(1));
50.into(),
power_levels_event_content.users_default + 1.into(),
);
power_levels_event_content.events_default = new_level; power_levels_event_content.events_default = new_level;
power_levels_event_content.invite = new_level; power_levels_event_content.invite = new_level;
@ -635,7 +708,7 @@ pub async fn upgrade_room_route(
let _ = db.rooms.build_and_append_pdu( let _ = db.rooms.build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomPowerLevels, event_type: EventType::RoomPowerLevels,
content: serde_json::to_value(power_levels_event_content) content: to_raw_value(&power_levels_event_content)
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),

6
src/client_server/search.rs

@ -74,7 +74,7 @@ pub async fn search_events_route(
} }
} }
let results = results let results: Vec<_> = results
.iter() .iter()
.map(|result| { .map(|result| {
Ok::<_, Error>(SearchResult { Ok::<_, Error>(SearchResult {
@ -95,7 +95,7 @@ pub async fn search_events_route(
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
.skip(skip) .skip(skip)
.take(limit) .take(limit)
.collect::<Vec<_>>(); .collect();
let next_batch = if results.len() < limit as usize { let next_batch = if results.len() < limit as usize {
None None
@ -114,7 +114,7 @@ pub async fn search_events_route(
.search_term .search_term
.split_terminator(|c: char| !c.is_alphanumeric()) .split_terminator(|c: char| !c.is_alphanumeric())
.map(str::to_lowercase) .map(str::to_lowercase)
.collect::<Vec<_>>(), .collect(),
}, },
}) })
.into()) .into())

12
src/client_server/session.rs

@ -60,10 +60,10 @@ pub async fn login_route(
// Validate login method // Validate login method
// TODO: Other login methods // TODO: Other login methods
let user_id = match &body.login_info { let user_id = match &body.login_info {
login::IncomingLoginInfo::Password { login::IncomingLoginInfo::Password(login::IncomingPassword {
identifier, identifier,
password, password,
} => { }) => {
let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier {
matrix_id matrix_id
} else { } else {
@ -97,7 +97,7 @@ pub async fn login_route(
user_id user_id
} }
login::IncomingLoginInfo::Token { token } => { login::IncomingLoginInfo::Token(login::IncomingToken { token }) => {
if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() {
let token = jsonwebtoken::decode::<Claims>( let token = jsonwebtoken::decode::<Claims>(
token, token,
@ -116,6 +116,12 @@ pub async fn login_route(
)); ));
} }
} }
_ => {
return Err(Error::BadRequest(
ErrorKind::Unknown,
"Unsupported login type.",
));
}
}; };
// Generate new device id if the user didn't specify one // Generate new device id if the user didn't specify one

36
src/client_server/state.rs

@ -10,8 +10,8 @@ use ruma::{
}, },
events::{ events::{
room::{ room::{
canonical_alias::CanonicalAliasEventContent, canonical_alias::RoomCanonicalAliasEventContent,
history_visibility::{HistoryVisibility, HistoryVisibilityEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
}, },
AnyStateEventContent, EventType, AnyStateEventContent, EventType,
}, },
@ -52,6 +52,7 @@ pub async fn send_state_event_for_key_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::Response { event_id }.into()) Ok(send_state_event::Response { event_id }.into())
} }
@ -73,6 +74,14 @@ pub async fn send_state_event_for_empty_key_route(
) -> ConduitResult<send_state_event::Response> { ) -> ConduitResult<send_state_event::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
// Forbid m.room.encryption if encryption is disabled
if &body.event_type == "m.room.encryption" && !db.globals.allow_encryption() {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption has been disabled",
));
}
let event_id = send_state_event_for_key_helper( let event_id = send_state_event_for_key_helper(
&db, &db,
sender_user, sender_user,
@ -85,6 +94,7 @@ pub async fn send_state_event_for_empty_key_route(
db.flush()?; db.flush()?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::Response { event_id }.into()) Ok(send_state_event::Response { event_id }.into())
} }
@ -112,13 +122,13 @@ pub async fn get_state_events_route(
db.rooms db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| { .map(|event| {
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone()) serde_json::from_str(event.content.get())
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
.map_err(|_| { .map_err(|_| {
Error::bad_database( Error::bad_database(
"Invalid room history visibility event in database.", "Invalid room history visibility event in database.",
) )
}) })
.map(|e| e.history_visibility)
}), }),
Some(Ok(HistoryVisibility::WorldReadable)) Some(Ok(HistoryVisibility::WorldReadable))
) )
@ -164,13 +174,13 @@ pub async fn get_state_events_for_key_route(
db.rooms db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| { .map(|event| {
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone()) serde_json::from_str(event.content.get())
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
.map_err(|_| { .map_err(|_| {
Error::bad_database( Error::bad_database(
"Invalid room history visibility event in database.", "Invalid room history visibility event in database.",
) )
}) })
.map(|e| e.history_visibility)
}), }),
Some(Ok(HistoryVisibility::WorldReadable)) Some(Ok(HistoryVisibility::WorldReadable))
) )
@ -190,7 +200,7 @@ pub async fn get_state_events_for_key_route(
))?; ))?;
Ok(get_state_events_for_key::Response { Ok(get_state_events_for_key::Response {
content: serde_json::from_value(event.content.clone()) content: serde_json::from_str(event.content.get())
.map_err(|_| Error::bad_database("Invalid event content in database"))?, .map_err(|_| Error::bad_database("Invalid event content in database"))?,
} }
.into()) .into())
@ -220,13 +230,13 @@ pub async fn get_state_events_for_empty_key_route(
db.rooms db.rooms
.room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")?
.map(|event| { .map(|event| {
serde_json::from_value::<HistoryVisibilityEventContent>(event.content.clone()) serde_json::from_str(event.content.get())
.map(|e: RoomHistoryVisibilityEventContent| e.history_visibility)
.map_err(|_| { .map_err(|_| {
Error::bad_database( Error::bad_database(
"Invalid room history visibility event in database.", "Invalid room history visibility event in database.",
) )
}) })
.map(|e| e.history_visibility)
}), }),
Some(Ok(HistoryVisibility::WorldReadable)) Some(Ok(HistoryVisibility::WorldReadable))
) )
@ -246,7 +256,7 @@ pub async fn get_state_events_for_empty_key_route(
))?; ))?;
Ok(get_state_events_for_key::Response { Ok(get_state_events_for_key::Response {
content: serde_json::from_value(event.content.clone()) content: serde_json::from_str(event.content.get())
.map_err(|_| Error::bad_database("Invalid event content in database"))?, .map_err(|_| Error::bad_database("Invalid event content in database"))?,
} }
.into()) .into())
@ -259,13 +269,13 @@ async fn send_state_event_for_key_helper(
event_type: EventType, event_type: EventType,
json: &Raw<AnyStateEventContent>, json: &Raw<AnyStateEventContent>,
state_key: String, state_key: String,
) -> Result<EventId> { ) -> Result<Arc<EventId>> {
let sender_user = sender; let sender_user = sender;
// TODO: Review this check, error if event is unparsable, use event type, allow alias if it // TODO: Review this check, error if event is unparsable, use event type, allow alias if it
// previously existed // previously existed
if let Ok(canonical_alias) = if let Ok(canonical_alias) =
serde_json::from_str::<CanonicalAliasEventContent>(json.json().get()) serde_json::from_str::<RoomCanonicalAliasEventContent>(json.json().get())
{ {
let mut aliases = canonical_alias.alt_aliases.clone(); let mut aliases = canonical_alias.alt_aliases.clone();
@ -295,7 +305,7 @@ async fn send_state_event_for_key_helper(
.roomid_mutex_state .roomid_mutex_state
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;

89
src/client_server/sync.rs

@ -1,13 +1,16 @@
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse};
use ruma::{ use ruma::{
api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, api::client::r0::{sync::sync_events, uiaa::UiaaResponse},
events::{room::member::MembershipState, AnySyncEphemeralRoomEvent, EventType}, events::{
room::member::{MembershipState, RoomMemberEventContent},
AnySyncEphemeralRoomEvent, EventType,
},
serde::Raw, serde::Raw,
DeviceId, RoomId, UserId, DeviceId, RoomId, UserId,
}; };
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::TryInto,
sync::Arc, sync::Arc,
time::Duration, time::Duration,
}; };
@ -57,9 +60,10 @@ use rocket::{get, tokio};
pub async fn sync_events_route( pub async fn sync_events_route(
db: DatabaseGuard, db: DatabaseGuard,
body: Ruma<sync_events::Request<'_>>, body: Ruma<sync_events::Request<'_>>,
) -> std::result::Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> { ) -> Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated");
let body = body.body;
let arc_db = Arc::new(db); let arc_db = Arc::new(db);
@ -129,7 +133,7 @@ pub async fn sync_events_route(
async fn sync_helper_wrapper( async fn sync_helper_wrapper(
db: Arc<DatabaseGuard>, db: Arc<DatabaseGuard>,
sender_user: UserId, sender_user: Box<UserId>,
sender_device: Box<DeviceId>, sender_device: Box<DeviceId>,
since: Option<String>, since: Option<String>,
full_state: bool, full_state: bool,
@ -173,13 +177,13 @@ async fn sync_helper_wrapper(
async fn sync_helper( async fn sync_helper(
db: Arc<DatabaseGuard>, db: Arc<DatabaseGuard>,
sender_user: UserId, sender_user: Box<UserId>,
sender_device: Box<DeviceId>, sender_device: Box<DeviceId>,
since: Option<String>, since: Option<String>,
full_state: bool, full_state: bool,
timeout: Option<Duration>, timeout: Option<Duration>,
// bool = caching allowed // bool = caching allowed
) -> std::result::Result<(sync_events::Response, bool), Error> { ) -> Result<(sync_events::Response, bool), Error> {
// TODO: match body.set_presence { // TODO: match body.set_presence {
db.rooms.edus.ping_presence(&sender_user)?; db.rooms.edus.ping_presence(&sender_user)?;
@ -241,13 +245,13 @@ async fn sync_helper(
}); });
// Take the last 10 events for the timeline // Take the last 10 events for the timeline
let timeline_pdus = non_timeline_pdus let timeline_pdus: Vec<_> = non_timeline_pdus
.by_ref() .by_ref()
.take(10) .take(10)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.into_iter() .into_iter()
.rev() .rev()
.collect::<Vec<_>>(); .collect();
let send_notification_counts = !timeline_pdus.is_empty() let send_notification_counts = !timeline_pdus.is_empty()
|| db || db
@ -287,13 +291,13 @@ async fn sync_helper(
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == EventType::RoomMember) .filter(|(_, pdu)| pdu.kind == EventType::RoomMember)
.map(|(_, pdu)| { .map(|(_, pdu)| {
let content = serde_json::from_value::< let content: RoomMemberEventContent =
ruma::events::room::member::MemberEventContent, serde_json::from_str(pdu.content.get()).map_err(|_| {
>(pdu.content.clone()) Error::bad_database("Invalid member event in database.")
.map_err(|_| Error::bad_database("Invalid member event in database."))?; })?;
if let Some(state_key) = &pdu.state_key { if let Some(state_key) = &pdu.state_key {
let user_id = UserId::try_from(state_key.clone()).map_err(|_| { let user_id = UserId::parse(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.") Error::bad_database("Invalid UserId in member PDU.")
})?; })?;
@ -343,11 +347,11 @@ async fn sync_helper(
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
let state_events = current_state_ids let state_events: Vec<_> = current_state_ids
.iter() .iter()
.map(|(_, id)| db.rooms.get_pdu(id)) .map(|(_, id)| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten()) .filter_map(|r| r.ok().flatten())
.collect::<Vec<_>>(); .collect();
( (
heroes, heroes,
@ -363,7 +367,7 @@ async fn sync_helper(
// Incremental /sync // Incremental /sync
let since_shortstatehash = since_shortstatehash.unwrap(); let since_shortstatehash = since_shortstatehash.unwrap();
let since_sender_member = db let since_sender_member: Option<RoomMemberEventContent> = db
.rooms .rooms
.state_get( .state_get(
since_shortstatehash, since_shortstatehash,
@ -371,13 +375,9 @@ async fn sync_helper(
sender_user.as_str(), sender_user.as_str(),
)? )?
.and_then(|pdu| { .and_then(|pdu| {
serde_json::from_value::<Raw<ruma::events::room::member::MemberEventContent>>( serde_json::from_str(pdu.content.get())
pdu.content.clone(), .map_err(|_| Error::bad_database("Invalid PDU in database."))
) .ok()
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
}); });
let joined_since_last_sync = since_sender_member let joined_since_last_sync = since_sender_member
@ -425,18 +425,16 @@ async fn sync_helper(
} }
if let Some(state_key) = &state_event.state_key { if let Some(state_key) = &state_event.state_key {
let user_id = UserId::try_from(state_key.clone()) let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user { if user_id == sender_user {
continue; continue;
} }
let new_membership = serde_json::from_value::< let new_membership = serde_json::from_str::<RoomMemberEventContent>(
Raw<ruma::events::room::member::MemberEventContent>, state_event.content.get(),
>(state_event.content.clone()) )
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid PDU in database."))? .map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership; .membership;
@ -525,18 +523,18 @@ async fn sync_helper(
Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string())) Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string()))
})?; })?;
let room_events = timeline_pdus let room_events: Vec<_> = timeline_pdus
.iter() .iter()
.map(|(_, pdu)| pdu.to_sync_room_event()) .map(|(_, pdu)| pdu.to_sync_room_event())
.collect::<Vec<_>>(); .collect();
let mut edus = db let mut edus: Vec<_> = db
.rooms .rooms
.edus .edus
.readreceipts_since(&room_id, since) .readreceipts_since(&room_id, since)
.filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|r| r.ok()) // Filter out buggy events
.map(|(_, _, v)| v) .map(|(_, _, v)| v)
.collect::<Vec<_>>(); .collect();
if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since {
edus.push( edus.push(
@ -565,7 +563,7 @@ async fn sync_helper(
.map_err(|_| Error::bad_database("Invalid account event in database.")) .map_err(|_| Error::bad_database("Invalid account event in database."))
.ok() .ok()
}) })
.collect::<Vec<_>>(), .collect(),
}, },
summary: sync_events::RoomSummary { summary: sync_events::RoomSummary {
heroes, heroes,
@ -630,7 +628,7 @@ async fn sync_helper(
} }
let mut left_rooms = BTreeMap::new(); let mut left_rooms = BTreeMap::new();
let all_left_rooms = db.rooms.rooms_left(&sender_user).collect::<Vec<_>>(); let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect();
for result in all_left_rooms { for result in all_left_rooms {
let (room_id, left_state_events) = result?; let (room_id, left_state_events) = result?;
@ -670,7 +668,7 @@ async fn sync_helper(
} }
let mut invited_rooms = BTreeMap::new(); let mut invited_rooms = BTreeMap::new();
let all_invited_rooms = db.rooms.rooms_invited(&sender_user).collect::<Vec<_>>(); let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect();
for result in all_invited_rooms { for result in all_invited_rooms {
let (room_id, invite_state_events) = result?; let (room_id, invite_state_events) = result?;
@ -739,7 +737,7 @@ async fn sync_helper(
presence: sync_events::Presence { presence: sync_events::Presence {
events: presence_updates events: presence_updates
.into_iter() .into_iter()
.map(|(_, v)| Raw::from(v)) .map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
.collect(), .collect(),
}, },
account_data: sync_events::GlobalAccountData { account_data: sync_events::GlobalAccountData {
@ -752,19 +750,13 @@ async fn sync_helper(
.map_err(|_| Error::bad_database("Invalid account event in database.")) .map_err(|_| Error::bad_database("Invalid account event in database."))
.ok() .ok()
}) })
.collect::<Vec<_>>(), .collect(),
}, },
device_lists: sync_events::DeviceLists { device_lists: sync_events::DeviceLists {
changed: device_list_updates.into_iter().collect(), changed: device_list_updates.into_iter().collect(),
left: device_list_left.into_iter().collect(), left: device_list_left.into_iter().collect(),
}, },
device_one_time_keys_count: if db.users.last_one_time_keys_update(&sender_user)? > since device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?,
|| since == 0
{
db.users.count_one_time_keys(&sender_user, &sender_device)?
} else {
BTreeMap::new()
},
to_device: sync_events::ToDevice { to_device: sync_events::ToDevice {
events: db events: db
.users .users
@ -778,7 +770,6 @@ async fn sync_helper(
&& response.presence.is_empty() && response.presence.is_empty()
&& response.account_data.is_empty() && response.account_data.is_empty()
&& response.device_lists.is_empty() && response.device_lists.is_empty()
&& response.device_one_time_keys_count.is_empty()
&& response.to_device.is_empty() && response.to_device.is_empty()
{ {
// Hang a few seconds so requests are not spammed // Hang a few seconds so requests are not spammed
@ -803,7 +794,7 @@ fn share_encrypted_room(
) -> Result<bool> { ) -> Result<bool> {
Ok(db Ok(db
.rooms .rooms
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
.filter(|room_id| room_id != ignore_room) .filter(|room_id| room_id != ignore_room)
.filter_map(|other_room_id| { .filter_map(|other_room_id| {

23
src/client_server/tag.rs

@ -1,7 +1,10 @@
use crate::{database::DatabaseGuard, ConduitResult, Ruma}; use crate::{database::DatabaseGuard, ConduitResult, Ruma};
use ruma::{ use ruma::{
api::client::r0::tag::{create_tag, delete_tag, get_tags}, api::client::r0::tag::{create_tag, delete_tag, get_tags},
events::EventType, events::{
tag::{TagEvent, TagEventContent},
EventType,
},
}; };
use std::collections::BTreeMap; use std::collections::BTreeMap;
@ -26,9 +29,9 @@ pub async fn update_tag_route(
let mut tags_event = db let mut tags_event = db
.account_data .account_data
.get::<ruma::events::tag::TagEvent>(Some(&body.room_id), sender_user, EventType::Tag)? .get(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| ruma::events::tag::TagEvent { .unwrap_or_else(|| TagEvent {
content: ruma::events::tag::TagEventContent { content: TagEventContent {
tags: BTreeMap::new(), tags: BTreeMap::new(),
}, },
}); });
@ -68,9 +71,9 @@ pub async fn delete_tag_route(
let mut tags_event = db let mut tags_event = db
.account_data .account_data
.get::<ruma::events::tag::TagEvent>(Some(&body.room_id), sender_user, EventType::Tag)? .get(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| ruma::events::tag::TagEvent { .unwrap_or_else(|| TagEvent {
content: ruma::events::tag::TagEventContent { content: TagEventContent {
tags: BTreeMap::new(), tags: BTreeMap::new(),
}, },
}); });
@ -108,9 +111,9 @@ pub async fn get_tags_route(
Ok(get_tags::Response { Ok(get_tags::Response {
tags: db tags: db
.account_data .account_data
.get::<ruma::events::tag::TagEvent>(Some(&body.room_id), sender_user, EventType::Tag)? .get(Some(&body.room_id), sender_user, EventType::Tag)?
.unwrap_or_else(|| ruma::events::tag::TagEvent { .unwrap_or_else(|| TagEvent {
content: ruma::events::tag::TagEventContent { content: TagEventContent {
tags: BTreeMap::new(), tags: BTreeMap::new(),
}, },
}) })

55
src/client_server/voip.rs

@ -1,6 +1,11 @@
use crate::ConduitResult; use crate::{database::DatabaseGuard, ConduitResult, Ruma};
use hmac::{Hmac, Mac, NewMac};
use ruma::api::client::r0::voip::get_turn_server_info; use ruma::api::client::r0::voip::get_turn_server_info;
use std::time::Duration; use ruma::SecondsSinceUnixEpoch;
use sha1::Sha1;
use std::time::{Duration, SystemTime};
type HmacSha1 = Hmac<Sha1>;
#[cfg(feature = "conduit_bin")] #[cfg(feature = "conduit_bin")]
use rocket::get; use rocket::get;
@ -8,14 +13,46 @@ use rocket::get;
/// # `GET /_matrix/client/r0/voip/turnServer` /// # `GET /_matrix/client/r0/voip/turnServer`
/// ///
/// TODO: Returns information about the recommended turn server. /// TODO: Returns information about the recommended turn server.
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] #[cfg_attr(
#[tracing::instrument] feature = "conduit_bin",
pub async fn turn_server_route() -> ConduitResult<get_turn_server_info::Response> { get("/_matrix/client/r0/voip/turnServer", data = "<body>")
)]
#[tracing::instrument(skip(body, db))]
pub async fn turn_server_route(
body: Ruma<get_turn_server_info::Request>,
db: DatabaseGuard,
) -> ConduitResult<get_turn_server_info::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let turn_secret = db.globals.turn_secret();
let (username, password) = if !turn_secret.is_empty() {
let expiry = SecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
)
.expect("time is valid");
let username: String = format!("{}:{}", expiry.get(), sender_user);
let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
.expect("HMAC can take key of any size");
mac.update(username.as_bytes());
let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD);
(username, password)
} else {
(
db.globals.turn_username().clone(),
db.globals.turn_password().clone(),
)
};
Ok(get_turn_server_info::Response { Ok(get_turn_server_info::Response {
username: "".to_owned(), username,
password: "".to_owned(), password,
uris: Vec::new(), uris: db.globals.turn_uris().to_vec(),
ttl: Duration::from_secs(60 * 60 * 24), ttl: Duration::from_secs(db.globals.turn_ttl()),
} }
.into()) .into())
} }

44
src/database.rs

@ -61,6 +61,8 @@ pub struct Config {
allow_encryption: bool, allow_encryption: bool,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
allow_federation: bool, allow_federation: bool,
#[serde(default = "true_fn")]
allow_room_creation: bool,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub allow_jaeger: bool, pub allow_jaeger: bool,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
@ -72,6 +74,16 @@ pub struct Config {
trusted_servers: Vec<Box<ServerName>>, trusted_servers: Vec<Box<ServerName>>,
#[serde(default = "default_log")] #[serde(default = "default_log")]
pub log: String, pub log: String,
#[serde(default)]
turn_username: String,
#[serde(default)]
turn_password: String,
#[serde(default = "Vec::new")]
turn_uris: Vec<String>,
#[serde(default)]
turn_secret: String,
#[serde(default = "default_turn_ttl")]
turn_ttl: u64,
#[serde(flatten)] #[serde(flatten)]
catchall: BTreeMap<String, IgnoredAny>, catchall: BTreeMap<String, IgnoredAny>,
@ -129,6 +141,10 @@ fn default_log() -> String {
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned() "info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
} }
fn default_turn_ttl() -> u64 {
60 * 60 * 24
}
#[cfg(feature = "sled")] #[cfg(feature = "sled")]
pub type Engine = abstraction::sled::Engine; pub type Engine = abstraction::sled::Engine;
@ -460,10 +476,9 @@ impl Database {
if db.globals.database_version()? < 6 { if db.globals.database_version()? < 6 {
// Set room member count // Set room member count
for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { for (roomid, _) in db.rooms.roomid_shortstatehash.iter() {
let room_id = let string = utils::string_from_bytes(&roomid).unwrap();
RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap(); let room_id = <&RoomId>::try_from(string.as_str()).unwrap();
db.rooms.update_joined_count(room_id, &db)?;
db.rooms.update_joined_count(&room_id, &db)?;
} }
db.globals.bump_database_version(6)?; db.globals.bump_database_version(6)?;
@ -473,7 +488,7 @@ impl Database {
if db.globals.database_version()? < 7 { if db.globals.database_version()? < 7 {
// Upgrade state store // Upgrade state store
let mut last_roomstates: HashMap<RoomId, u64> = HashMap::new(); let mut last_roomstates: HashMap<Box<RoomId>, u64> = HashMap::new();
let mut current_sstatehash: Option<u64> = None; let mut current_sstatehash: Option<u64> = None;
let mut current_room = None; let mut current_room = None;
let mut current_state = HashSet::new(); let mut current_state = HashSet::new();
@ -499,13 +514,13 @@ impl Database {
if let Some(parent_stateinfo) = states_parents.last() { if let Some(parent_stateinfo) = states_parents.last() {
let statediffnew = current_state let statediffnew = current_state
.difference(&parent_stateinfo.1) .difference(&parent_stateinfo.1)
.cloned() .copied()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let statediffremoved = parent_stateinfo let statediffremoved = parent_stateinfo
.1 .1
.difference(&current_state) .difference(&current_state)
.cloned() .copied()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
(statediffnew, statediffremoved) (statediffnew, statediffremoved)
@ -554,7 +569,7 @@ impl Database {
if let Some(current_sstatehash) = current_sstatehash { if let Some(current_sstatehash) = current_sstatehash {
handle_state( handle_state(
current_sstatehash, current_sstatehash,
current_room.as_ref().unwrap(), current_room.as_deref().unwrap(),
current_state, current_state,
&mut last_roomstates, &mut last_roomstates,
)?; )?;
@ -570,10 +585,9 @@ impl Database {
.get(&seventid) .get(&seventid)
.unwrap() .unwrap()
.unwrap(); .unwrap();
let event_id = let string = utils::string_from_bytes(&event_id).unwrap();
EventId::try_from(utils::string_from_bytes(&event_id).unwrap()) let event_id = <&EventId>::try_from(string.as_str()).unwrap();
.unwrap(); let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap();
let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap();
if Some(&pdu.room_id) != current_room.as_ref() { if Some(&pdu.room_id) != current_room.as_ref() {
current_room = Some(pdu.room_id.clone()); current_room = Some(pdu.room_id.clone());
@ -588,7 +602,7 @@ impl Database {
if let Some(current_sstatehash) = current_sstatehash { if let Some(current_sstatehash) = current_sstatehash {
handle_state( handle_state(
current_sstatehash, current_sstatehash,
current_room.as_ref().unwrap(), current_room.as_deref().unwrap(),
current_state, current_state,
&mut last_roomstates, &mut last_roomstates,
)?; )?;
@ -699,7 +713,7 @@ impl Database {
println!("Deleting starts"); println!("Deleting starts");
let batch2 = db let batch2: Vec<_> = db
.rooms .rooms
.tokenids .tokenids
.iter() .iter()
@ -711,7 +725,7 @@ impl Database {
None None
} }
}) })
.collect::<Vec<_>>(); .collect();
for key in batch2 { for key in batch2 {
println!("del"); println!("del");

4
src/database/abstraction.rs

@ -22,7 +22,7 @@ pub trait Tree: Send + Sync {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>; fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>;
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()>; fn insert_batch(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()>;
fn remove(&self, key: &[u8]) -> Result<()>; fn remove(&self, key: &[u8]) -> Result<()>;
@ -35,7 +35,7 @@ pub trait Tree: Send + Sync {
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a>; ) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a>;
fn increment(&self, key: &[u8]) -> Result<Vec<u8>>; fn increment(&self, key: &[u8]) -> Result<Vec<u8>>;
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()>; fn increment_batch(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()>;
fn scan_prefix<'a>( fn scan_prefix<'a>(
&'a self, &'a self,

26
src/database/admin.rs

@ -1,21 +1,19 @@
use std::{ use std::{convert::TryInto, sync::Arc};
convert::{TryFrom, TryInto},
sync::Arc,
};
use crate::{pdu::PduBuilder, Database}; use crate::{pdu::PduBuilder, Database};
use rocket::futures::{channel::mpsc, stream::StreamExt}; use rocket::futures::{channel::mpsc, stream::StreamExt};
use ruma::{ use ruma::{
events::{room::message, EventType}, events::{room::message::RoomMessageEventContent, EventType},
UserId, UserId,
}; };
use serde_json::value::to_raw_value;
use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard};
use tracing::warn; use tracing::warn;
pub enum AdminCommand { pub enum AdminCommand {
RegisterAppservice(serde_yaml::Value), RegisterAppservice(serde_yaml::Value),
ListAppservices, ListAppservices,
SendMessage(message::MessageEventContent), SendMessage(RoomMessageEventContent),
} }
#[derive(Clone)] #[derive(Clone)]
@ -35,14 +33,14 @@ impl Admin {
let guard = db.read().await; let guard = db.read().await;
let conduit_user = let conduit_user = UserId::parse(format!("@conduit:{}", guard.globals.server_name()))
UserId::try_from(format!("@conduit:{}", guard.globals.server_name())) .expect("@conduit:server_name is valid");
.expect("@conduit:server_name is valid");
let conduit_room = guard let conduit_room = guard
.rooms .rooms
.id_from_alias( .id_from_alias(
&format!("#admins:{}", guard.globals.server_name()) format!("#admins:{}", guard.globals.server_name())
.as_str()
.try_into() .try_into()
.expect("#admins:server_name is a valid room alias"), .expect("#admins:server_name is a valid room alias"),
) )
@ -58,7 +56,7 @@ impl Admin {
drop(guard); drop(guard);
let send_message = |message: message::MessageEventContent, let send_message = |message: RoomMessageEventContent,
guard: RwLockReadGuard<'_, Database>, guard: RwLockReadGuard<'_, Database>,
mutex_lock: &MutexGuard<'_, ()>| { mutex_lock: &MutexGuard<'_, ()>| {
guard guard
@ -66,7 +64,7 @@ impl Admin {
.build_and_append_pdu( .build_and_append_pdu(
PduBuilder { PduBuilder {
event_type: EventType::RoomMessage, event_type: EventType::RoomMessage,
content: serde_json::to_value(message) content: to_raw_value(&message)
.expect("event is valid, we just created it"), .expect("event is valid, we just created it"),
unsigned: None, unsigned: None,
state_key: None, state_key: None,
@ -106,9 +104,9 @@ impl Admin {
count, count,
appservices.into_iter().filter_map(|r| r.ok()).collect::<Vec<_>>().join(", ") appservices.into_iter().filter_map(|r| r.ok()).collect::<Vec<_>>().join(", ")
); );
send_message(message::MessageEventContent::text_plain(output), guard, &state_lock); send_message(RoomMessageEventContent::text_plain(output), guard, &state_lock);
} else { } else {
send_message(message::MessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock);
} }
} }
AdminCommand::SendMessage(message) => { AdminCommand::SendMessage(message) => {

45
src/database/globals.rs

@ -40,13 +40,13 @@ pub struct Globals {
dns_resolver: TokioAsyncResolver, dns_resolver: TokioAsyncResolver,
jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>, jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>,
pub(super) server_signingkeys: Arc<dyn Tree>, pub(super) server_signingkeys: Arc<dyn Tree>,
pub bad_event_ratelimiter: Arc<RwLock<HashMap<EventId, RateLimitState>>>, pub bad_event_ratelimiter: Arc<RwLock<HashMap<Box<EventId>, RateLimitState>>>,
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>, pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
pub servername_ratelimiter: Arc<RwLock<HashMap<Box<ServerName>, Arc<Semaphore>>>>, pub servername_ratelimiter: Arc<RwLock<HashMap<Box<ServerName>, Arc<Semaphore>>>>,
pub sync_receivers: RwLock<HashMap<(UserId, Box<DeviceId>), SyncHandle>>, pub sync_receivers: RwLock<HashMap<(Box<UserId>, Box<DeviceId>), SyncHandle>>,
pub roomid_mutex_insert: RwLock<HashMap<RoomId, Arc<Mutex<()>>>>, pub roomid_mutex_insert: RwLock<HashMap<Box<RoomId>, Arc<Mutex<()>>>>,
pub roomid_mutex_state: RwLock<HashMap<RoomId, Arc<TokioMutex<()>>>>, pub roomid_mutex_state: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<RoomId, Arc<TokioMutex<()>>>>, // this lock will be held longer pub roomid_mutex_federation: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>, // this lock will be held longer
pub rotate: RotationHandler, pub rotate: RotationHandler,
} }
@ -57,8 +57,7 @@ pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>);
impl RotationHandler { impl RotationHandler {
pub fn new() -> Self { pub fn new() -> Self {
let (s, r) = broadcast::channel::<()>(1); let (s, r) = broadcast::channel(1);
Self(s, r) Self(s, r)
} }
@ -211,6 +210,10 @@ impl Globals {
self.config.allow_federation self.config.allow_federation
} }
pub fn allow_room_creation(&self) -> bool {
self.config.allow_room_creation
}
pub fn trusted_servers(&self) -> &[Box<ServerName>] { pub fn trusted_servers(&self) -> &[Box<ServerName>] {
&self.config.trusted_servers &self.config.trusted_servers
} }
@ -223,6 +226,26 @@ impl Globals {
self.jwt_decoding_key.as_ref() self.jwt_decoding_key.as_ref()
} }
pub fn turn_password(&self) -> &String {
&self.config.turn_password
}
pub fn turn_ttl(&self) -> u64 {
self.config.turn_ttl
}
pub fn turn_uris(&self) -> &[String] {
&self.config.turn_uris
}
pub fn turn_username(&self) -> &String {
&self.config.turn_username
}
pub fn turn_secret(&self) -> &String {
&self.config.turn_secret
}
/// TODO: the key valid until timestamp is only honored in room version > 4 /// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones. /// Remove the outdated keys and insert the new ones.
/// ///
@ -231,7 +254,7 @@ impl Globals {
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> { ) -> Result<BTreeMap<Box<ServerSigningKeyId>, VerifyKey>> {
// Not atomic, but this is not critical // Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
@ -270,12 +293,12 @@ impl Globals {
pub fn signing_keys_for( pub fn signing_keys_for(
&self, &self,
origin: &ServerName, origin: &ServerName,
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> { ) -> Result<BTreeMap<Box<ServerSigningKeyId>, VerifyKey>> {
let signingkeys = self let signingkeys = self
.server_signingkeys .server_signingkeys
.get(origin.as_bytes())? .get(origin.as_bytes())?
.and_then(|bytes| serde_json::from_slice::<ServerSigningKeys>(&bytes).ok()) .and_then(|bytes| serde_json::from_slice(&bytes).ok())
.map(|keys| { .map(|keys: ServerSigningKeys| {
let mut tree = keys.verify_keys; let mut tree = keys.verify_keys;
tree.extend( tree.extend(
keys.old_verify_keys keys.old_verify_keys

21
src/database/key_backups.rs

@ -6,7 +6,7 @@ use ruma::{
}, },
RoomId, UserId, RoomId, UserId,
}; };
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; use std::{collections::BTreeMap, sync::Arc};
use super::abstraction::Tree; use super::abstraction::Tree;
@ -81,7 +81,7 @@ impl KeyBackups {
)?; )?;
self.backupid_etag self.backupid_etag
.insert(&key, &globals.next_count()?.to_be_bytes())?; .insert(&key, &globals.next_count()?.to_be_bytes())?;
Ok(version.to_string()) Ok(version.to_owned())
} }
pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result<Option<String>> { pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result<Option<String>> {
@ -94,15 +94,15 @@ impl KeyBackups {
.iter_from(&last_possible_key, true) .iter_from(&last_possible_key, true)
.take_while(move |(k, _)| k.starts_with(&prefix)) .take_while(move |(k, _)| k.starts_with(&prefix))
.next() .next()
.map_or(Ok(None), |(key, _)| { .map(|(key, _)| {
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
.expect("rsplit always returns an element"), .expect("rsplit always returns an element"),
) )
.map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))
.map(Some)
}) })
.transpose()
} }
pub fn get_latest_backup(&self, user_id: &UserId) -> Result<Option<(String, BackupAlgorithm)>> { pub fn get_latest_backup(&self, user_id: &UserId) -> Result<Option<(String, BackupAlgorithm)>> {
@ -115,7 +115,7 @@ impl KeyBackups {
.iter_from(&last_possible_key, true) .iter_from(&last_possible_key, true)
.take_while(move |(k, _)| k.starts_with(&prefix)) .take_while(move |(k, _)| k.starts_with(&prefix))
.next() .next()
.map_or(Ok(None), |(key, value)| { .map(|(key, value)| {
let version = utils::string_from_bytes( let version = utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -123,13 +123,14 @@ impl KeyBackups {
) )
.map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?;
Ok(Some(( Ok((
version, version,
serde_json::from_slice(&value).map_err(|_| { serde_json::from_slice(&value).map_err(|_| {
Error::bad_database("Algorithm in backupid_algorithm is invalid.") Error::bad_database("Algorithm in backupid_algorithm is invalid.")
})?, })?,
))) ))
}) })
.transpose()
} }
pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result<Option<BackupAlgorithm>> { pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result<Option<BackupAlgorithm>> {
@ -208,13 +209,13 @@ impl KeyBackups {
&self, &self,
user_id: &UserId, user_id: &UserId,
version: &str, version: &str,
) -> Result<BTreeMap<RoomId, RoomKeyBackup>> { ) -> Result<BTreeMap<Box<RoomId>, RoomKeyBackup>> {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
prefix.extend_from_slice(version.as_bytes()); prefix.extend_from_slice(version.as_bytes());
prefix.push(0xff); prefix.push(0xff);
let mut rooms = BTreeMap::<RoomId, RoomKeyBackup>::new(); let mut rooms = BTreeMap::<Box<RoomId>, RoomKeyBackup>::new();
for result in self for result in self
.backupkeyid_backup .backupkeyid_backup
@ -230,7 +231,7 @@ impl KeyBackups {
Error::bad_database("backupkeyid_backup session_id is invalid.") Error::bad_database("backupkeyid_backup session_id is invalid.")
})?; })?;
let room_id = RoomId::try_from( let room_id = RoomId::parse(
utils::string_from_bytes(parts.next().ok_or_else(|| { utils::string_from_bytes(parts.next().ok_or_else(|| {
Error::bad_database("backupkeyid_backup key is invalid.") Error::bad_database("backupkeyid_backup key is invalid.")
})?) })?)

6
src/database/proxy.rs

@ -125,7 +125,7 @@ impl WildCardedDomain {
} }
impl std::str::FromStr for WildCardedDomain { impl std::str::FromStr for WildCardedDomain {
type Err = std::convert::Infallible; type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
// maybe do some domain validation? // maybe do some domain validation?
Ok(if s.starts_with("*.") { Ok(if s.starts_with("*.") {
WildCardedDomain::WildCarded(s[1..].to_owned()) WildCardedDomain::WildCarded(s[1..].to_owned())
@ -136,8 +136,8 @@ impl std::str::FromStr for WildCardedDomain {
}) })
} }
} }
impl<'de> serde::de::Deserialize<'de> for WildCardedDomain { impl<'de> Deserialize<'de> for WildCardedDomain {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: serde::de::Deserializer<'de>, D: serde::de::Deserializer<'de>,
{ {

38
src/database/pusher.rs

@ -9,8 +9,10 @@ use ruma::{
}, },
IncomingResponse, OutgoingRequest, SendAccessToken, IncomingResponse, OutgoingRequest, SendAccessToken,
}, },
events::{room::power_levels::PowerLevelsEventContent, AnySyncRoomEvent, EventType}, events::{
identifiers::RoomName, room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent},
AnySyncRoomEvent, EventType,
},
push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak},
serde::Raw, serde::Raw,
uint, RoomId, UInt, UserId, uint, RoomId, UInt, UserId,
@ -177,11 +179,11 @@ pub async fn send_push_notice(
let mut notify = None; let mut notify = None;
let mut tweaks = Vec::new(); let mut tweaks = Vec::new();
let power_levels: PowerLevelsEventContent = db let power_levels: RoomPowerLevelsEventContent = db
.rooms .rooms
.room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")?
.map(|ev| { .map(|ev| {
serde_json::from_value(ev.content.clone()) serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event")) .map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
}) })
.transpose()? .transpose()?
@ -226,13 +228,13 @@ pub async fn send_push_notice(
pub fn get_actions<'a>( pub fn get_actions<'a>(
user: &UserId, user: &UserId,
ruleset: &'a Ruleset, ruleset: &'a Ruleset,
power_levels: &PowerLevelsEventContent, power_levels: &RoomPowerLevelsEventContent,
pdu: &Raw<AnySyncRoomEvent>, pdu: &Raw<AnySyncRoomEvent>,
room_id: &RoomId, room_id: &RoomId,
db: &Database, db: &Database,
) -> Result<&'a [Action]> { ) -> Result<&'a [Action]> {
let ctx = PushConditionRoomCtx { let ctx = PushConditionRoomCtx {
room_id: room_id.clone(), room_id: room_id.to_owned(),
member_count: 10_u32.into(), // TODO: get member count efficiently member_count: 10_u32.into(), // TODO: get member count efficiently
user_display_name: db user_display_name: db
.users .users
@ -275,7 +277,7 @@ async fn send_notice(
let mut data_minus_url = pusher.data.clone(); let mut data_minus_url = pusher.data.clone();
// The url must be stripped off according to spec // The url must be stripped off according to spec
data_minus_url.url = None; data_minus_url.url = None;
device.data = Some(data_minus_url); device.data = data_minus_url;
// Tweaks are only added if the format is NOT event_id_only // Tweaks are only added if the format is NOT event_id_only
if !event_id_only { if !event_id_only {
@ -318,16 +320,18 @@ async fn send_notice(
let user_name = db.users.displayname(&event.sender)?; let user_name = db.users.displayname(&event.sender)?;
notifi.sender_display_name = user_name.as_deref(); notifi.sender_display_name = user_name.as_deref();
let room_name = db
.rooms let room_name = if let Some(room_name_pdu) =
.room_state_get(&event.room_id, &EventType::RoomName, "")? db.rooms
.map(|pdu| match pdu.content.get("name") { .room_state_get(&event.room_id, &EventType::RoomName, "")?
Some(serde_json::Value::String(s)) => { {
Some(Box::<RoomName>::try_from(&**s).expect("room name is valid")) serde_json::from_str::<RoomNameEventContent>(room_name_pdu.content.get())
} .map_err(|_| Error::bad_database("Invalid room name event in database."))?
_ => None, .name
}) } else {
.flatten(); None
};
notifi.room_name = room_name.as_deref(); notifi.room_name = room_name.as_deref();
send_request( send_request(

582
src/database/rooms.rs

File diff suppressed because it is too large Load Diff

63
src/database/rooms/edus.rs

@ -11,7 +11,7 @@ use ruma::{
}; };
use std::{ use std::{
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::TryInto,
mem, mem,
sync::Arc, sync::Arc,
}; };
@ -76,8 +76,13 @@ impl RoomEdus {
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
since: u64, since: u64,
) -> impl Iterator<Item = Result<(UserId, u64, Raw<ruma::events::AnySyncEphemeralRoomEvent>)>> + 'a ) -> impl Iterator<
{ Item = Result<(
Box<UserId>,
u64,
Raw<ruma::events::AnySyncEphemeralRoomEvent>,
)>,
> + 'a {
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
let prefix2 = prefix.clone(); let prefix2 = prefix.clone();
@ -92,7 +97,7 @@ impl RoomEdus {
let count = let count =
utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::<u64>()]) utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::<u64>()])
.map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?;
let user_id = UserId::try_from( let user_id = UserId::parse(
utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..]) utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..])
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid readreceiptid userid bytes in db.") Error::bad_database("Invalid readreceiptid userid bytes in db.")
@ -162,11 +167,12 @@ impl RoomEdus {
Ok(self Ok(self
.roomuserid_lastprivatereadupdate .roomuserid_lastprivatereadupdate
.get(&key)? .get(&key)?
.map_or(Ok::<_, Error>(None), |bytes| { .map(|bytes| {
Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")
})?)) })
})? })
.transpose()?
.unwrap_or(0)) .unwrap_or(0))
} }
@ -286,11 +292,12 @@ impl RoomEdus {
Ok(self Ok(self
.roomid_lasttypingupdate .roomid_lasttypingupdate
.get(room_id.as_bytes())? .get(room_id.as_bytes())?
.map_or(Ok::<_, Error>(None), |bytes| { .map(|bytes| {
Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
})?)) })
})? })
.transpose()?
.unwrap_or(0)) .unwrap_or(0))
} }
@ -303,17 +310,13 @@ impl RoomEdus {
let mut user_ids = HashSet::new(); let mut user_ids = HashSet::new();
for user_id in self for (_, user_id) in self.typingid_userid.scan_prefix(prefix) {
.typingid_userid let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| {
.scan_prefix(prefix) Error::bad_database("User ID in typingid_userid is invalid unicode.")
.map(|(_, user_id)| { })?)
UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| { .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?;
Error::bad_database("User ID in typingid_userid is invalid unicode.")
})?) user_ids.insert(user_id);
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))
})
{
user_ids.insert(user_id?);
} }
Ok(SyncEphemeralRoomEvent { Ok(SyncEphemeralRoomEvent {
@ -331,7 +334,7 @@ impl RoomEdus {
&self, &self,
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
presence: ruma::events::presence::PresenceEvent, presence: PresenceEvent,
globals: &super::super::globals::Globals, globals: &super::super::globals::Globals,
) -> Result<()> { ) -> Result<()> {
// TODO: Remove old entry? Or maybe just wipe completely from time to time? // TODO: Remove old entry? Or maybe just wipe completely from time to time?
@ -399,7 +402,7 @@ impl RoomEdus {
self.presenceid_presence self.presenceid_presence
.get(&presence_id)? .get(&presence_id)?
.map(|value| { .map(|value| {
let mut presence = serde_json::from_slice::<PresenceEvent>(&value) let mut presence: PresenceEvent = serde_json::from_slice(&value)
.map_err(|_| Error::bad_database("Invalid presence event in db."))?; .map_err(|_| Error::bad_database("Invalid presence event in db."))?;
let current_timestamp: UInt = utils::millis_since_unix_epoch() let current_timestamp: UInt = utils::millis_since_unix_epoch()
.try_into() .try_into()
@ -447,7 +450,7 @@ impl RoomEdus {
{ {
// Send new presence events to set the user offline // Send new presence events to set the user offline
let count = globals.next_count()?.to_be_bytes(); let count = globals.next_count()?.to_be_bytes();
let user_id = utils::string_from_bytes(&user_id_bytes) let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes)
.map_err(|_| { .map_err(|_| {
Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.")
})? })?
@ -473,7 +476,7 @@ impl RoomEdus {
presence: PresenceState::Offline, presence: PresenceState::Offline,
status_msg: None, status_msg: None,
}, },
sender: user_id.clone(), sender: user_id.to_owned(),
}) })
.expect("PresenceEvent can be serialized"), .expect("PresenceEvent can be serialized"),
)?; )?;
@ -496,7 +499,7 @@ impl RoomEdus {
since: u64, since: u64,
_rooms: &super::Rooms, _rooms: &super::Rooms,
_globals: &super::super::globals::Globals, _globals: &super::super::globals::Globals,
) -> Result<HashMap<UserId, PresenceEvent>> { ) -> Result<HashMap<Box<UserId>, PresenceEvent>> {
//self.presence_maintain(rooms, globals)?; //self.presence_maintain(rooms, globals)?;
let mut prefix = room_id.as_bytes().to_vec(); let mut prefix = room_id.as_bytes().to_vec();
@ -511,7 +514,7 @@ impl RoomEdus {
.iter_from(&*first_possible_edu, false) .iter_from(&*first_possible_edu, false)
.take_while(|(key, _)| key.starts_with(&prefix)) .take_while(|(key, _)| key.starts_with(&prefix))
{ {
let user_id = UserId::try_from( let user_id = UserId::parse(
utils::string_from_bytes( utils::string_from_bytes(
key.rsplit(|&b| b == 0xff) key.rsplit(|&b| b == 0xff)
.next() .next()
@ -521,7 +524,7 @@ impl RoomEdus {
) )
.map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?;
let mut presence = serde_json::from_slice::<PresenceEvent>(&value) let mut presence: PresenceEvent = serde_json::from_slice(&value)
.map_err(|_| Error::bad_database("Invalid presence event in db."))?; .map_err(|_| Error::bad_database("Invalid presence event in db."))?;
let current_timestamp: UInt = utils::millis_since_unix_epoch() let current_timestamp: UInt = utils::millis_since_unix_epoch()

61
src/database/sending.rs

@ -1,6 +1,6 @@
use std::{ use std::{
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::TryInto,
fmt::Debug, fmt::Debug,
sync::Arc, sync::Arc,
time::{Duration, Instant}, time::{Duration, Instant},
@ -27,7 +27,7 @@ use ruma::{
OutgoingRequest, OutgoingRequest,
}, },
device_id, device_id,
events::{push_rules, AnySyncEphemeralRoomEvent, EventType}, events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType},
push, push,
receipt::ReceiptType, receipt::ReceiptType,
uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
@ -165,13 +165,13 @@ impl Sending {
} }
// Find events that have been added since starting the last request // Find events that have been added since starting the last request
let new_events = guard.sending.servernameevent_data let new_events: Vec<_> = guard.sending.servernameevent_data
.scan_prefix(prefix.clone()) .scan_prefix(prefix.clone())
.filter_map(|(k, v)| { .filter_map(|(k, v)| {
Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k))
}) })
.take(30) .take(30)
.collect::<Vec<_>>(); .collect::<>();
// TODO: find edus // TODO: find edus
@ -344,8 +344,8 @@ impl Sending {
continue; continue;
} }
let event = let event: AnySyncEphemeralRoomEvent =
serde_json::from_str::<AnySyncEphemeralRoomEvent>(read_receipt.json().get()) serde_json::from_str(read_receipt.json().get())
.map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?;
let federation_event = match event { let federation_event = match event {
AnySyncEphemeralRoomEvent::Receipt(r) => { AnySyncEphemeralRoomEvent::Receipt(r) => {
@ -397,8 +397,8 @@ impl Sending {
// Because synapse resyncs, we can just insert dummy data // Because synapse resyncs, we can just insert dummy data
let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { let edu = Edu::DeviceListUpdate(DeviceListUpdateContent {
user_id, user_id,
device_id: device_id!("dummy"), device_id: device_id!("dummy").to_owned(),
device_display_name: "Dummy".to_owned(), device_display_name: Some("Dummy".to_owned()),
stream_id: uint!(1), stream_id: uint!(1),
prev_id: Vec::new(), prev_id: Vec::new(),
deleted: None, deleted: None,
@ -485,7 +485,7 @@ impl Sending {
kind: OutgoingKind, kind: OutgoingKind,
events: Vec<SendingEventType>, events: Vec<SendingEventType>,
db: Arc<RwLock<Database>>, db: Arc<RwLock<Database>>,
) -> std::result::Result<OutgoingKind, (OutgoingKind, Error)> { ) -> Result<OutgoingKind, (OutgoingKind, Error)> {
let db = db.read().await; let db = db.read().await;
match &kind { match &kind {
@ -573,23 +573,28 @@ impl Sending {
for pdu in pdus { for pdu in pdus {
// Redacted events are not notification targets (we don't send push for them) // Redacted events are not notification targets (we don't send push for them)
if pdu.unsigned.get("redacted_because").is_some() { if let Some(unsigned) = &pdu.unsigned {
continue; if let Ok(unsigned) =
serde_json::from_str::<serde_json::Value>(unsigned.get())
{
if unsigned.get("redacted_because").is_some() {
continue;
}
}
} }
let userid = let userid = UserId::parse(utils::string_from_bytes(user).map_err(|_| {
UserId::try_from(utils::string_from_bytes(user).map_err(|_| { (
( kind.clone(),
kind.clone(), Error::bad_database("Invalid push user string in db."),
Error::bad_database("Invalid push user string in db."), )
) })?)
})?) .map_err(|_| {
.map_err(|_| { (
( kind.clone(),
kind.clone(), Error::bad_database("Invalid push user id in db."),
Error::bad_database("Invalid push user id in db."), )
) })?;
})?;
let mut senderkey = user.clone(); let mut senderkey = user.clone();
senderkey.push(0xff); senderkey.push(0xff);
@ -606,9 +611,9 @@ impl Sending {
let rules_for_user = db let rules_for_user = db
.account_data .account_data
.get::<push_rules::PushRulesEvent>(None, &userid, EventType::PushRules) .get(None, &userid, EventType::PushRules)
.unwrap_or_default() .unwrap_or_default()
.map(|ev| ev.content.global) .map(|ev: PushRulesEvent| ev.content.global)
.unwrap_or_else(|| push::Ruleset::server_default(&userid)); .unwrap_or_else(|| push::Ruleset::server_default(&userid));
let unread: UInt = db let unread: UInt = db
@ -726,7 +731,7 @@ impl Sending {
})?; })?;
( (
OutgoingKind::Appservice(Box::<ServerName>::try_from(server).map_err(|_| { OutgoingKind::Appservice(ServerName::parse(server).map_err(|_| {
Error::bad_database("Invalid server string in server_currenttransaction") Error::bad_database("Invalid server string in server_currenttransaction")
})?), })?),
if value.is_empty() { if value.is_empty() {
@ -765,7 +770,7 @@ impl Sending {
})?; })?;
( (
OutgoingKind::Normal(Box::<ServerName>::try_from(server).map_err(|_| { OutgoingKind::Normal(ServerName::parse(server).map_err(|_| {
Error::bad_database("Invalid server string in server_currenttransaction") Error::bad_database("Invalid server string in server_currenttransaction")
})?), })?),
if value.is_empty() { if value.is_empty() {

29
src/database/uiaa.rs

@ -5,7 +5,8 @@ use ruma::{
api::client::{ api::client::{
error::ErrorKind, error::ErrorKind,
r0::uiaa::{ r0::uiaa::{
IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, UiaaInfo, AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId,
UiaaInfo,
}, },
}, },
signatures::CanonicalJsonValue, signatures::CanonicalJsonValue,
@ -99,10 +100,10 @@ impl Uiaa {
} }
// Password was correct! Let's add it to `completed` // Password was correct! Let's add it to `completed`
uiaainfo.completed.push("m.login.password".to_owned()); uiaainfo.completed.push(AuthType::Password);
} }
IncomingAuthData::Dummy(_) => { IncomingAuthData::Dummy(_) => {
uiaainfo.completed.push("m.login.dummy".to_owned()); uiaainfo.completed.push(AuthType::Dummy);
} }
k => error!("type not supported: {:?}", k), k => error!("type not supported: {:?}", k),
} }
@ -174,16 +175,14 @@ impl Uiaa {
self.userdevicesessionid_uiaarequest self.userdevicesessionid_uiaarequest
.get(&userdevicesessionid)? .get(&userdevicesessionid)?
.map_or(Ok(None), |bytes| { .map(|bytes| {
Ok::<_, Error>(Some( serde_json::from_str::<CanonicalJsonValue>(
serde_json::from_str::<CanonicalJsonValue>( &utils::string_from_bytes(&bytes)
&utils::string_from_bytes(&bytes).map_err(|_| { .map_err(|_| Error::bad_database("Invalid uiaa request bytes in db."))?,
Error::bad_database("Invalid uiaa request bytes in db.") )
})?, .map_err(|_| Error::bad_database("Invalid uiaa request in db."))
)
.map_err(|_| Error::bad_database("Invalid uiaa request in db."))?,
))
}) })
.transpose()
} }
fn update_uiaa_session( fn update_uiaa_session(
@ -224,7 +223,7 @@ impl Uiaa {
userdevicesessionid.push(0xff); userdevicesessionid.push(0xff);
userdevicesessionid.extend_from_slice(session.as_bytes()); userdevicesessionid.extend_from_slice(session.as_bytes());
let uiaainfo = serde_json::from_slice::<UiaaInfo>( serde_json::from_slice(
&self &self
.userdevicesessionid_uiaainfo .userdevicesessionid_uiaainfo
.get(&userdevicesessionid)? .get(&userdevicesessionid)?
@ -233,8 +232,6 @@ impl Uiaa {
"UIAA session does not exist.", "UIAA session does not exist.",
))?, ))?,
) )
.map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid."))?; .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid."))
Ok(uiaainfo)
} }
} }

54
src/database/users.rs

@ -5,9 +5,10 @@ use ruma::{
events::{AnyToDeviceEvent, EventType}, events::{AnyToDeviceEvent, EventType},
identifiers::MxcUri, identifiers::MxcUri,
serde::Raw, serde::Raw,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, UInt, UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt,
UserId,
}; };
use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc}; use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc};
use tracing::warn; use tracing::warn;
use super::abstraction::Tree; use super::abstraction::Tree;
@ -53,6 +54,21 @@ impl Users {
.is_empty()) .is_empty())
} }
/// Check if a user is an admin
#[tracing::instrument(skip(self, user_id, rooms, globals))]
pub fn is_admin(
&self,
user_id: &UserId,
rooms: &super::rooms::Rooms,
globals: &super::globals::Globals,
) -> Result<bool> {
let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap();
rooms.is_joined(user_id, &admin_room_id)
}
/// Create a new user account on this homeserver. /// Create a new user account on this homeserver.
#[tracing::instrument(skip(self, user_id, password))] #[tracing::instrument(skip(self, user_id, password))]
pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
@ -68,7 +84,7 @@ impl Users {
/// Find out which user an access token belongs to. /// Find out which user an access token belongs to.
#[tracing::instrument(skip(self, token))] #[tracing::instrument(skip(self, token))]
pub fn find_from_token(&self, token: &str) -> Result<Option<(UserId, String)>> { pub fn find_from_token(&self, token: &str) -> Result<Option<(Box<UserId>, String)>> {
self.token_userdeviceid self.token_userdeviceid
.get(token.as_bytes())? .get(token.as_bytes())?
.map_or(Ok(None), |bytes| { .map_or(Ok(None), |bytes| {
@ -81,7 +97,7 @@ impl Users {
})?; })?;
Ok(Some(( Ok(Some((
UserId::try_from(utils::string_from_bytes(user_bytes).map_err(|_| { UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| {
Error::bad_database("User ID in token_userdeviceid is invalid unicode.") Error::bad_database("User ID in token_userdeviceid is invalid unicode.")
})?) })?)
.map_err(|_| { .map_err(|_| {
@ -96,9 +112,9 @@ impl Users {
/// Returns an iterator over all users on this homeserver. /// Returns an iterator over all users on this homeserver.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn iter(&self) -> impl Iterator<Item = Result<UserId>> + '_ { pub fn iter(&self) -> impl Iterator<Item = Result<Box<UserId>>> + '_ {
self.userid_password.iter().map(|(bytes, _)| { self.userid_password.iter().map(|(bytes, _)| {
UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in userid_password is invalid unicode.") Error::bad_database("User ID in userid_password is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) .map_err(|_| Error::bad_database("User ID in userid_password is invalid."))
@ -164,20 +180,21 @@ impl Users {
/// Get the avatar_url of a user. /// Get the avatar_url of a user.
#[tracing::instrument(skip(self, user_id))] #[tracing::instrument(skip(self, user_id))]
pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<MxcUri>> { pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<Box<MxcUri>>> {
self.userid_avatarurl self.userid_avatarurl
.get(user_id.as_bytes())? .get(user_id.as_bytes())?
.map(|bytes| { .map(|bytes| {
let s = utils::string_from_bytes(&bytes) let s = utils::string_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) s.try_into()
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
}) })
.transpose() .transpose()
} }
/// Sets a new avatar_url or removes it if avatar_url is None. /// Sets a new avatar_url or removes it if avatar_url is None.
#[tracing::instrument(skip(self, user_id, avatar_url))] #[tracing::instrument(skip(self, user_id, avatar_url))]
pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<MxcUri>) -> Result<()> { pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<Box<MxcUri>>) -> Result<()> {
if let Some(avatar_url) = avatar_url { if let Some(avatar_url) = avatar_url {
self.userid_avatarurl self.userid_avatarurl
.insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?;
@ -392,7 +409,7 @@ impl Users {
device_id: &DeviceId, device_id: &DeviceId,
key_algorithm: &DeviceKeyAlgorithm, key_algorithm: &DeviceKeyAlgorithm,
globals: &super::globals::Globals, globals: &super::globals::Globals,
) -> Result<Option<(DeviceKeyId, OneTimeKey)>> { ) -> Result<Option<(Box<DeviceKeyId>, OneTimeKey)>> {
let mut prefix = user_id.as_bytes().to_vec(); let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
prefix.extend_from_slice(device_id.as_bytes()); prefix.extend_from_slice(device_id.as_bytes());
@ -442,7 +459,7 @@ impl Users {
.scan_prefix(userdeviceid) .scan_prefix(userdeviceid)
.map(|(bytes, _)| { .map(|(bytes, _)| {
Ok::<_, Error>( Ok::<_, Error>(
serde_json::from_slice::<DeviceKeyId>( serde_json::from_slice::<Box<DeviceKeyId>>(
&*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| {
Error::bad_database("OneTimeKey ID in db is invalid.") Error::bad_database("OneTimeKey ID in db is invalid.")
})?, })?,
@ -603,10 +620,11 @@ impl Users {
key.push(0xff); key.push(0xff);
key.extend_from_slice(key_id.as_bytes()); key.extend_from_slice(key_id.as_bytes());
let mut cross_signing_key = let mut cross_signing_key: serde_json::Value =
serde_json::from_slice::<serde_json::Value>(&self.keyid_key.get(&key)?.ok_or( serde_json::from_slice(&self.keyid_key.get(&key)?.ok_or(Error::BadRequest(
Error::BadRequest(ErrorKind::InvalidParam, "Tried to sign nonexistent key."), ErrorKind::InvalidParam,
)?) "Tried to sign nonexistent key.",
))?)
.map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?;
let signatures = cross_signing_key let signatures = cross_signing_key
@ -614,7 +632,7 @@ impl Users {
.ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))?
.as_object_mut() .as_object_mut()
.ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))?
.entry(sender_id.clone()) .entry(sender_id.to_owned())
.or_insert_with(|| serde_json::Map::new().into()); .or_insert_with(|| serde_json::Map::new().into());
signatures signatures
@ -639,7 +657,7 @@ impl Users {
user_or_room_id: &str, user_or_room_id: &str,
from: u64, from: u64,
to: Option<u64>, to: Option<u64>,
) -> impl Iterator<Item = Result<UserId>> + 'a { ) -> impl Iterator<Item = Result<Box<UserId>>> + 'a {
let mut prefix = user_or_room_id.as_bytes().to_vec(); let mut prefix = user_or_room_id.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
@ -665,7 +683,7 @@ impl Users {
} }
}) })
.map(|(_, bytes)| { .map(|(_, bytes)| {
UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid."))

2
src/error.rs

@ -20,7 +20,7 @@ use {
tracing::error, tracing::error,
}; };
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Error, Debug)] #[derive(Error, Debug)]
pub enum Error { pub enum Error {

6
src/lib.rs

@ -1,3 +1,9 @@
#![warn(
rust_2018_idioms,
unused_qualifications,
clippy::cloned_instead_of_copied,
clippy::str_to_string
)]
#![allow(clippy::suspicious_else_formatting)] #![allow(clippy::suspicious_else_formatting)]
#![deny(clippy::dbg_macro)] #![deny(clippy::dbg_macro)]

8
src/main.rs

@ -1,4 +1,9 @@
#![warn(rust_2018_idioms)] #![warn(
rust_2018_idioms,
unused_qualifications,
clippy::cloned_instead_of_copied,
clippy::str_to_string
)]
#![allow(clippy::suspicious_else_formatting)] #![allow(clippy::suspicious_else_formatting)]
#![deny(clippy::dbg_macro)] #![deny(clippy::dbg_macro)]
@ -96,6 +101,7 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<
client_server::create_typing_event_route, client_server::create_typing_event_route,
client_server::create_room_route, client_server::create_room_route,
client_server::redact_event_route, client_server::redact_event_route,
client_server::report_event_route,
client_server::create_alias_route, client_server::create_alias_route,
client_server::delete_alias_route, client_server::delete_alias_route,
client_server::get_alias_route, client_server::get_alias_route,

132
src/pdu.rs

@ -1,45 +1,55 @@
use crate::Error; use crate::Error;
use ruma::{ use ruma::{
events::{ events::{
pdu::EventHash, room::member::MemberEventContent, AnyEphemeralRoomEvent, room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyInitialStateEvent,
AnyInitialStateEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent,
AnySyncStateEvent, EventType, StateEvent, EventType, StateEvent,
}, },
serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw},
state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId,
ServerSigningKeyId, UInt, UserId,
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::json; use serde_json::{
use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; json,
value::{to_raw_value, RawValue as RawJsonValue},
};
use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc};
use tracing::warn; use tracing::warn;
/// Content hashes of a PDU.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct EventHash {
/// The SHA-256 hash.
pub sha256: String,
}
#[derive(Clone, Deserialize, Serialize, Debug)] #[derive(Clone, Deserialize, Serialize, Debug)]
pub struct PduEvent { pub struct PduEvent {
pub event_id: EventId, pub event_id: Arc<EventId>,
pub room_id: RoomId, pub room_id: Box<RoomId>,
pub sender: UserId, pub sender: Box<UserId>,
pub origin_server_ts: UInt, pub origin_server_ts: UInt,
#[serde(rename = "type")] #[serde(rename = "type")]
pub kind: EventType, pub kind: EventType,
pub content: serde_json::Value, pub content: Box<RawJsonValue>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub state_key: Option<String>, pub state_key: Option<String>,
pub prev_events: Vec<EventId>, pub prev_events: Vec<Arc<EventId>>,
pub depth: UInt, pub depth: UInt,
pub auth_events: Vec<EventId>, pub auth_events: Vec<Arc<EventId>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub redacts: Option<EventId>, pub redacts: Option<Arc<EventId>>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub unsigned: BTreeMap<String, serde_json::Value>, pub unsigned: Option<Box<RawJsonValue>>,
pub hashes: EventHash, pub hashes: EventHash,
pub signatures: BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, String>>, #[serde(default, skip_serializing_if = "Option::is_none")]
pub signatures: Option<Box<RawJsonValue>>, // BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, String>>
} }
impl PduEvent { impl PduEvent {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> { pub fn redact(&mut self, reason: &PduEvent) -> crate::Result<()> {
self.unsigned.clear(); self.unsigned = None;
let allowed: &[&str] = match self.kind { let allowed: &[&str] = match self.kind {
EventType::RoomMember => &["membership"], EventType::RoomMember => &["membership"],
@ -59,10 +69,9 @@ impl PduEvent {
_ => &[], _ => &[],
}; };
let old_content = self let mut old_content: BTreeMap<String, serde_json::Value> =
.content serde_json::from_str(self.content.get())
.as_object_mut() .map_err(|_| Error::bad_database("PDU in db has invalid content."))?;
.ok_or_else(|| Error::bad_database("PDU in db has invalid content."))?;
let mut new_content = serde_json::Map::new(); let mut new_content = serde_json::Map::new();
@ -72,12 +81,23 @@ impl PduEvent {
} }
} }
self.unsigned.insert( self.unsigned = Some(to_raw_value(&json!({
"redacted_because".to_owned(), "redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works")
serde_json::to_value(reason).expect("to_value(PduEvent) always works"), })).expect("to string always works"));
);
self.content = to_raw_value(&new_content).expect("to string always works");
self.content = new_content.into(); Ok(())
}
pub fn remove_transaction_id(&mut self) -> crate::Result<()> {
if let Some(unsigned) = &self.unsigned {
let mut unsigned: BTreeMap<String, Box<RawJsonValue>> =
serde_json::from_str(unsigned.get())
.map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?;
unsigned.remove("transaction_id");
self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid"));
}
Ok(()) Ok(())
} }
@ -192,7 +212,7 @@ impl PduEvent {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn to_member_event(&self) -> Raw<StateEvent<MemberEventContent>> { pub fn to_member_event(&self) -> Raw<StateEvent<RoomMemberEventContent>> {
let json = json!({ let json = json!({
"content": self.content, "content": self.content,
"type": self.kind, "type": self.kind,
@ -212,7 +232,7 @@ impl PduEvent {
#[tracing::instrument] #[tracing::instrument]
pub fn convert_to_outgoing_federation_event( pub fn convert_to_outgoing_federation_event(
mut pdu_json: CanonicalJsonObject, mut pdu_json: CanonicalJsonObject,
) -> Raw<ruma::events::pdu::Pdu> { ) -> Box<RawJsonValue> {
if let Some(unsigned) = pdu_json if let Some(unsigned) = pdu_json
.get_mut("unsigned") .get_mut("unsigned")
.and_then(|val| val.as_object_mut()) .and_then(|val| val.as_object_mut())
@ -229,10 +249,7 @@ impl PduEvent {
// ) // )
// .expect("Raw::from_value always works") // .expect("Raw::from_value always works")
serde_json::from_value::<Raw<_>>( to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value")
serde_json::to_value(pdu_json).expect("CanonicalJson is valid serde_json::Value"),
)
.expect("Raw::from_value always works")
} }
pub fn from_id_val( pub fn from_id_val(
@ -240,7 +257,7 @@ impl PduEvent {
mut json: CanonicalJsonObject, mut json: CanonicalJsonObject,
) -> Result<Self, serde_json::Error> { ) -> Result<Self, serde_json::Error> {
json.insert( json.insert(
"event_id".to_string(), "event_id".to_owned(),
CanonicalJsonValue::String(event_id.as_str().to_owned()), CanonicalJsonValue::String(event_id.as_str().to_owned()),
); );
@ -249,7 +266,9 @@ impl PduEvent {
} }
impl state_res::Event for PduEvent { impl state_res::Event for PduEvent {
fn event_id(&self) -> &EventId { type Id = Arc<EventId>;
fn event_id(&self) -> &Self::Id {
&self.event_id &self.event_id
} }
@ -265,7 +284,7 @@ impl state_res::Event for PduEvent {
&self.kind &self.kind
} }
fn content(&self) -> &serde_json::Value { fn content(&self) -> &RawJsonValue {
&self.content &self.content
} }
@ -277,33 +296,17 @@ impl state_res::Event for PduEvent {
self.state_key.as_deref() self.state_key.as_deref()
} }
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + '_> { fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + '_> {
Box::new(self.prev_events.iter()) Box::new(self.prev_events.iter())
} }
fn depth(&self) -> &UInt { fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + '_> {
&self.depth
}
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + '_> {
Box::new(self.auth_events.iter()) Box::new(self.auth_events.iter())
} }
fn redacts(&self) -> Option<&EventId> { fn redacts(&self) -> Option<&Self::Id> {
self.redacts.as_ref() self.redacts.as_ref()
} }
fn hashes(&self) -> &EventHash {
&self.hashes
}
fn signatures(&self) -> BTreeMap<Box<ServerName>, BTreeMap<ruma::ServerSigningKeyId, String>> {
self.signatures.clone()
}
fn unsigned(&self) -> &BTreeMap<String, serde_json::Value> {
&self.unsigned
}
} }
// These impl's allow us to dedup state snapshots when resolving state // These impl's allow us to dedup state snapshots when resolving state
@ -329,19 +332,20 @@ impl Ord for PduEvent {
/// ///
/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap<String, CanonicalJsonValue>`. /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap<String, CanonicalJsonValue>`.
pub(crate) fn gen_event_id_canonical_json( pub(crate) fn gen_event_id_canonical_json(
pdu: &Raw<ruma::events::pdu::Pdu>, pdu: &RawJsonValue,
) -> crate::Result<(EventId, CanonicalJsonObject)> { ) -> crate::Result<(Box<EventId>, CanonicalJsonObject)> {
let value = serde_json::from_str(pdu.json().get()).map_err(|e| { let value = serde_json::from_str(pdu.get()).map_err(|e| {
warn!("Error parsing incoming event {:?}: {:?}", pdu, e); warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response") Error::BadServerResponse("Invalid PDU in server response")
})?; })?;
let event_id = EventId::try_from(&*format!( let event_id = format!(
"${}", "${}",
// Anything higher than version3 behaves the same // Anything higher than version3 behaves the same
ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) ruma::signatures::reference_hash(&value, &RoomVersionId::V6)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
)) )
.try_into()
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
Ok((event_id, value)) Ok((event_id, value))
@ -352,10 +356,10 @@ pub(crate) fn gen_event_id_canonical_json(
pub struct PduBuilder { pub struct PduBuilder {
#[serde(rename = "type")] #[serde(rename = "type")]
pub event_type: EventType, pub event_type: EventType,
pub content: serde_json::Value, pub content: Box<RawJsonValue>,
pub unsigned: Option<BTreeMap<String, serde_json::Value>>, pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>, pub state_key: Option<String>,
pub redacts: Option<EventId>, pub redacts: Option<Arc<EventId>>,
} }
/// Direct conversion prevents loss of the empty `state_key` that ruma requires. /// Direct conversion prevents loss of the empty `state_key` that ruma requires.
@ -363,7 +367,7 @@ impl From<AnyInitialStateEvent> for PduBuilder {
fn from(event: AnyInitialStateEvent) -> Self { fn from(event: AnyInitialStateEvent) -> Self {
Self { Self {
event_type: EventType::from(event.event_type()), event_type: EventType::from(event.event_type()),
content: serde_json::value::to_value(event.content()) content: to_raw_value(&event.content())
.expect("AnyStateEventContent came from JSON and can thus turn back into JSON."), .expect("AnyStateEventContent came from JSON and can thus turn back into JSON."),
unsigned: None, unsigned: None,
state_key: Some(event.state_key().to_owned()), state_key: Some(event.state_key().to_owned()),

12
src/ruma_wrapper.rs

@ -20,7 +20,6 @@ use {
}, },
ruma::api::{AuthScheme, IncomingRequest}, ruma::api::{AuthScheme, IncomingRequest},
std::collections::BTreeMap, std::collections::BTreeMap,
std::convert::TryFrom,
std::io::Cursor, std::io::Cursor,
tracing::{debug, warn}, tracing::{debug, warn},
}; };
@ -29,7 +28,7 @@ use {
/// first. /// first.
pub struct Ruma<T: Outgoing> { pub struct Ruma<T: Outgoing> {
pub body: T::Incoming, pub body: T::Incoming,
pub sender_user: Option<UserId>, pub sender_user: Option<Box<UserId>>,
pub sender_device: Option<Box<DeviceId>>, pub sender_device: Option<Box<DeviceId>>,
pub sender_servername: Option<Box<ServerName>>, pub sender_servername: Option<Box<ServerName>>,
// This is None when body is not a valid string // This is None when body is not a valid string
@ -86,7 +85,7 @@ where
registration registration
.get("as_token") .get("as_token")
.and_then(|as_token| as_token.as_str()) .and_then(|as_token| as_token.as_str())
.map_or(false, |as_token| token.as_deref() == Some(as_token)) .map_or(false, |as_token| token == Some(as_token))
}) { }) {
match metadata.authentication { match metadata.authentication {
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
@ -103,8 +102,7 @@ where
.unwrap() .unwrap()
}, },
|string| { |string| {
UserId::try_from(string.expect("parsing to string always works")) UserId::parse(string.expect("parsing to string always works")).unwrap()
.unwrap()
}, },
); );
@ -171,7 +169,7 @@ where
} }
}; };
let origin = match Box::<ServerName>::try_from(origin_str) { let origin = match ServerName::parse(origin_str) {
Ok(s) => s, Ok(s) => s,
_ => { _ => {
warn!( warn!(
@ -344,7 +342,7 @@ impl<T: Outgoing> Deref for Ruma<T> {
} }
/// This struct converts ruma responses into rocket http responses. /// This struct converts ruma responses into rocket http responses.
pub type ConduitResult<T> = std::result::Result<RumaResponse<T>, Error>; pub type ConduitResult<T> = Result<RumaResponse<T>, Error>;
pub fn response<T: OutgoingResponse>(response: RumaResponse<T>) -> response::Result<'static> { pub fn response<T: OutgoingResponse>(response: RumaResponse<T>) -> response::Result<'static> {
let http_response = response let http_response = response

370
src/server_server.rs

File diff suppressed because it is too large Load Diff

2
src/utils.rs

@ -123,7 +123,7 @@ pub fn deserialize_from_str<
E: std::fmt::Display, E: std::fmt::Display,
>( >(
deserializer: D, deserializer: D,
) -> std::result::Result<T, D::Error> { ) -> Result<T, D::Error> {
struct Visitor<T: FromStr<Err = E>, E>(std::marker::PhantomData<T>); struct Visitor<T: FromStr<Err = E>, E>(std::marker::PhantomData<T>);
impl<'de, T: FromStr<Err = Err>, Err: std::fmt::Display> serde::de::Visitor<'de> impl<'de, T: FromStr<Err = Err>, Err: std::fmt::Display> serde::de::Visitor<'de>
for Visitor<T, Err> for Visitor<T, Err>

1
tests/sytest/sytest-whitelist

@ -510,3 +510,4 @@ remote user can join room with version 5
remote user can join room with version 6 remote user can join room with version 6
setting 'm.room.name' respects room powerlevel setting 'm.room.name' respects room powerlevel
setting 'm.room.power_levels' respects room powerlevel setting 'm.room.power_levels' respects room powerlevel
Federation publicRoom Name/topic keys are correct

Loading…
Cancel
Save