mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2026-01-16 20:50:33 +00:00
Compare commits
57 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1109293992 | ||
|
|
3c29f82974 | ||
|
|
663f88e717 | ||
|
|
a3dccee243 | ||
|
|
c0ebe0d982 | ||
|
|
1b46c80389 | ||
|
|
2c549984c0 | ||
|
|
ecab7a50ea | ||
|
|
2903a3a13a | ||
|
|
952992c85b | ||
|
|
c0be36a17f | ||
|
|
d1dee04615 | ||
|
|
ef2695de0c | ||
|
|
29f2b433f0 | ||
|
|
07f80346b4 | ||
|
|
4f68eafa3e | ||
|
|
327d369188 | ||
|
|
ca7483df85 | ||
|
|
16b6d2a71e | ||
|
|
871a3f214a | ||
|
|
10d12676cf | ||
|
|
dec3a9603a | ||
|
|
86aaf27659 | ||
|
|
bc913d1156 | ||
|
|
ef4bff09eb | ||
|
|
4816f77fd7 | ||
|
|
dfd9e65396 | ||
|
|
b1481c7c1a | ||
|
|
d9e0d68f20 | ||
|
|
08183fc999 | ||
|
|
d9b043d32c | ||
|
|
ed4ad67e73 | ||
|
|
a523c82f5f | ||
|
|
4d6d3443ae | ||
|
|
9cd400db6c | ||
|
|
fd51230044 | ||
|
|
45e5f06b86 | ||
|
|
620ad92331 | ||
|
|
c9860af11c | ||
|
|
d7adce97df | ||
|
|
71b3d3c818 | ||
|
|
da3701c0cf | ||
|
|
96813b1317 | ||
|
|
b0b953f348 | ||
|
|
cdfdc6ff4f | ||
|
|
2393c3f3c0 | ||
|
|
0d16b38a68 | ||
|
|
ff33534c07 | ||
|
|
adb21d5c1a | ||
|
|
e927b8aa5e | ||
|
|
ba48ca68fc | ||
|
|
294b429436 | ||
|
|
37c14c3c69 | ||
|
|
d0581da638 | ||
|
|
38aad4f7be | ||
|
|
20d9e885bf | ||
|
|
2f20ad86f9 |
@@ -5,6 +5,7 @@
|
||||
!.git
|
||||
!docker/healthcheck.sh
|
||||
!docker/start.sh
|
||||
!macros
|
||||
!migrations
|
||||
!src
|
||||
|
||||
|
||||
@@ -280,12 +280,13 @@
|
||||
## The default for new users. If changed, it will be updated during login for existing users.
|
||||
# PASSWORD_ITERATIONS=600000
|
||||
|
||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||
## Controls whether users can set or show password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
|
||||
## Controls whether a password hint should be shown directly in the web page if
|
||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||
## as this provides unauthenticated access to potentially sensitive data.
|
||||
## SMTP service is not configured and password hints are allowed.
|
||||
## Not recommended for publicly-accessible instances because this provides
|
||||
## unauthenticated access to potentially sensitive data.
|
||||
# SHOW_PASSWORD_HINT=false
|
||||
|
||||
#########################
|
||||
@@ -349,6 +350,9 @@
|
||||
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
||||
## - "extension-refresh": Temporarily enable the new extension design until general availability (should be used with the beta Chrome extension)
|
||||
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
||||
## - "inline-menu-positioning-improvements": Enable the use of inline menu password generator and identity suggestions in the browser extension.
|
||||
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0)
|
||||
## - "ssh-agent": Enable SSH agent support on Desktop. (Needs desktop >=2024.12.0)
|
||||
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||
|
||||
## Require new device emails. When a user logs in an email is required to be sent.
|
||||
@@ -407,6 +411,14 @@
|
||||
## Multiple values must be separated with a whitespace.
|
||||
# ALLOWED_IFRAME_ANCESTORS=
|
||||
|
||||
## Allowed connect-src (Know the risks!)
|
||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src
|
||||
## Allows other domains to URLs which can be loaded using script interfaces like the Forwarded email alias feature
|
||||
## This adds the configured value to the 'Content-Security-Policy' headers 'connect-src' value.
|
||||
## Multiple values must be separated with a whitespace. And only HTTPS values are allowed.
|
||||
## Example: "https://my-addy-io.domain.tld https://my-simplelogin.domain.tld"
|
||||
# ALLOWED_CONNECT_SRC=""
|
||||
|
||||
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
|
||||
# LOGIN_RATELIMIT_SECONDS=60
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.
|
||||
|
||||
30
.github/workflows/build.yml
vendored
30
.github/workflows/build.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
|
||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2
|
||||
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203 # master @ Dec 14, 2024, 5:49 AM GMT+1
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
|
||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a # master @ Aug 8, 2024, 7:36 PM GMT+2
|
||||
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203 # master @ Dec 14, 2024, 5:49 AM GMT+1
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
@@ -107,7 +107,8 @@ jobs:
|
||||
# End Show environment
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
|
||||
- name: Rust Caching
|
||||
uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7
|
||||
with:
|
||||
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||
@@ -117,33 +118,39 @@ jobs:
|
||||
|
||||
# Run cargo tests
|
||||
# First test all features together, afterwards test them separately.
|
||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc,query_logger"
|
||||
id: test_sqlite_mysql_postgresql_mimalloc_logger
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cargo test --features sqlite,mysql,postgresql,enable_mimalloc,query_logger
|
||||
|
||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: test_sqlite_mysql_postgresql_mimalloc
|
||||
if: $${{ always() }}
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cargo test --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
- name: "test features: sqlite,mysql,postgresql"
|
||||
id: test_sqlite_mysql_postgresql
|
||||
if: $${{ always() }}
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cargo test --features sqlite,mysql,postgresql
|
||||
|
||||
- name: "test features: sqlite"
|
||||
id: test_sqlite
|
||||
if: $${{ always() }}
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cargo test --features sqlite
|
||||
|
||||
- name: "test features: mysql"
|
||||
id: test_mysql
|
||||
if: $${{ always() }}
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cargo test --features mysql
|
||||
|
||||
- name: "test features: postgresql"
|
||||
id: test_postgresql
|
||||
if: $${{ always() }}
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
cargo test --features postgresql
|
||||
# End Run cargo tests
|
||||
@@ -152,7 +159,7 @@ jobs:
|
||||
# Run cargo clippy, and fail on warnings
|
||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: clippy
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
if: ${{ !cancelled() && matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
# End Run cargo clippy
|
||||
@@ -161,7 +168,7 @@ jobs:
|
||||
# Run cargo fmt (Only run on rust-toolchain defined version)
|
||||
- name: "check formatting"
|
||||
id: formatting
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
if: ${{ !cancelled() && matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
# End Run cargo fmt
|
||||
@@ -176,6 +183,7 @@ jobs:
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|Job|Status|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|---|------|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc,query_logger)|${{ steps.test_sqlite_mysql_postgresql_mimalloc_logger.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql)|${{ steps.test_sqlite_mysql_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite)|${{ steps.test_sqlite.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
4
.github/workflows/hadolint.yml
vendored
4
.github/workflows/hadolint.yml
vendored
@@ -13,12 +13,12 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
# End Checkout the repo
|
||||
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1
|
||||
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||
with:
|
||||
|
||||
122
.github/workflows/release.yml
vendored
122
.github/workflows/release.yml
vendored
@@ -27,11 +27,16 @@ jobs:
|
||||
if: ${{ github.ref_type == 'branch' }}
|
||||
|
||||
docker-build:
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120
|
||||
needs: skip_check
|
||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||
# Start a local docker registry to extract the final Alpine static build binaries
|
||||
# Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them
|
||||
services:
|
||||
registry:
|
||||
image: registry:2
|
||||
@@ -58,18 +63,18 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Initialize QEMU binfmt support
|
||||
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
|
||||
uses: docker/setup-qemu-action@53851d14592bedcffcf25ea515637cff71ef929a # v3.3.0
|
||||
with:
|
||||
platforms: "arm64,arm"
|
||||
|
||||
# Start Docker Buildx
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1
|
||||
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0
|
||||
# https://github.com/moby/buildkit/issues/3969
|
||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||
with:
|
||||
@@ -159,13 +164,13 @@ jobs:
|
||||
#
|
||||
|
||||
- name: Add localhost registry
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
- name: Bake ${{ matrix.base_image }} containers
|
||||
uses: docker/bake-action@2e3d19baedb14545e5d41222653874f25d5b4dfb # v5.10.0
|
||||
id: bake_vw
|
||||
uses: docker/bake-action@5ca506d06f70338a4968df87fd8bfee5cbfb84c7 # v6.0.0
|
||||
env:
|
||||
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||
@@ -175,16 +180,47 @@ jobs:
|
||||
with:
|
||||
pull: true
|
||||
push: true
|
||||
source: .
|
||||
files: docker/docker-bake.hcl
|
||||
targets: "${{ matrix.base_image }}-multi"
|
||||
set: |
|
||||
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
||||
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
||||
|
||||
- name: Extract digest SHA
|
||||
shell: bash
|
||||
run: |
|
||||
GET_DIGEST_SHA="$(jq -r '.["${{ matrix.base_image }}-multi"]."containerimage.digest"' <<< '${{ steps.bake_vw.outputs.metadata }}')"
|
||||
echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}"
|
||||
|
||||
# Attest container images
|
||||
- name: Attest - docker.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
|
||||
with:
|
||||
subject-name: ${{ vars.DOCKERHUB_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: Attest - ghcr.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
|
||||
with:
|
||||
subject-name: ${{ vars.GHCR_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: Attest - quay.io - ${{ matrix.base_image }}
|
||||
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
|
||||
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
|
||||
with:
|
||||
subject-name: ${{ vars.QUAY_REPO }}
|
||||
subject-digest: ${{ env.DIGEST_SHA }}
|
||||
push-to-registry: true
|
||||
|
||||
|
||||
# Extract the Alpine binaries from the containers
|
||||
- name: Extract binaries
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
shell: bash
|
||||
run: |
|
||||
# Check which main tag we are going to build determined by github.ref_type
|
||||
@@ -194,59 +230,65 @@ jobs:
|
||||
EXTRACT_TAG="testing"
|
||||
fi
|
||||
|
||||
# Check which base_image was used and append -alpine if needed
|
||||
if [[ "${{ matrix.base_image }}" == "alpine" ]]; then
|
||||
EXTRACT_TAG="${EXTRACT_TAG}-alpine"
|
||||
fi
|
||||
|
||||
# After each extraction the image is removed.
|
||||
# This is needed because using different platforms doesn't trigger a new pull/download
|
||||
|
||||
# Extract amd64 binary
|
||||
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker cp amd64:/vaultwarden vaultwarden-amd64
|
||||
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
docker cp amd64:/vaultwarden vaultwarden-amd64-${{ matrix.base_image }}
|
||||
docker rm --force amd64
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
|
||||
# Extract arm64 binary
|
||||
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker cp arm64:/vaultwarden vaultwarden-arm64
|
||||
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
docker cp arm64:/vaultwarden vaultwarden-arm64-${{ matrix.base_image }}
|
||||
docker rm --force arm64
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
|
||||
# Extract armv7 binary
|
||||
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker cp armv7:/vaultwarden vaultwarden-armv7
|
||||
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
docker cp armv7:/vaultwarden vaultwarden-armv7-${{ matrix.base_image }}
|
||||
docker rm --force armv7
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
|
||||
# Extract armv6 binary
|
||||
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker cp armv6:/vaultwarden vaultwarden-armv6
|
||||
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
docker cp armv6:/vaultwarden vaultwarden-armv6-${{ matrix.base_image }}
|
||||
docker rm --force armv6
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
|
||||
|
||||
# Upload artifacts to Github Actions
|
||||
- name: "Upload amd64 artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
# Upload artifacts to Github Actions and Attest the binaries
|
||||
- name: "Upload amd64 artifact ${{ matrix.base_image }}"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||
path: vaultwarden-amd64
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64-${{ matrix.base_image }}
|
||||
path: vaultwarden-amd64-${{ matrix.base_image }}
|
||||
|
||||
- name: "Upload arm64 artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
- name: "Upload arm64 artifact ${{ matrix.base_image }}"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||
path: vaultwarden-arm64
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64-${{ matrix.base_image }}
|
||||
path: vaultwarden-arm64-${{ matrix.base_image }}
|
||||
|
||||
- name: "Upload armv7 artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
- name: "Upload armv7 artifact ${{ matrix.base_image }}"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||
path: vaultwarden-armv7
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7-${{ matrix.base_image }}
|
||||
path: vaultwarden-armv7-${{ matrix.base_image }}
|
||||
|
||||
- name: "Upload armv6 artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: ${{ matrix.base_image == 'alpine' }}
|
||||
- name: "Upload armv6 artifact ${{ matrix.base_image }}"
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
|
||||
with:
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||
path: vaultwarden-armv6
|
||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6-${{ matrix.base_image }}
|
||||
path: vaultwarden-armv6-${{ matrix.base_image }}
|
||||
|
||||
- name: "Attest artifacts ${{ matrix.base_image }}"
|
||||
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
|
||||
with:
|
||||
subject-path: vaultwarden-*
|
||||
# End Upload artifacts to Github Actions
|
||||
|
||||
9
.github/workflows/trivy.yml
vendored
9
.github/workflows/trivy.yml
vendored
@@ -28,10 +28,13 @@ jobs:
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 #v4.2.1
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@5681af892cd0f4997658e2bacc62bd0a894cf564 # v0.27.0
|
||||
uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0 # v0.29.0
|
||||
env:
|
||||
TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2
|
||||
TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1
|
||||
with:
|
||||
scan-type: repo
|
||||
ignore-unfixed: true
|
||||
@@ -40,6 +43,6 @@ jobs:
|
||||
severity: CRITICAL,HIGH
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.26.6
|
||||
uses: github/codeql-action/upload-sarif@86b04fb0e47484f7282357688f21d5d0e32175fe # v3.27.5
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
||||
1185
Cargo.lock
generated
1185
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
90
Cargo.toml
90
Cargo.toml
@@ -1,9 +1,11 @@
|
||||
workspace = { members = ["macros"] }
|
||||
|
||||
[package]
|
||||
name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.80.0"
|
||||
rust-version = "1.83.0"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
@@ -36,13 +38,15 @@ unstable = []
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
# Logging
|
||||
syslog = "6.1.1"
|
||||
syslog = "7.0.0"
|
||||
|
||||
[dependencies]
|
||||
macros = { path = "./macros" }
|
||||
|
||||
# Logging
|
||||
log = "0.4.22"
|
||||
fern = { version = "0.7.0", features = ["syslog-6", "reopen-1"] }
|
||||
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
log = "0.4.25"
|
||||
fern = { version = "0.7.1", features = ["syslog-7", "reopen-1"] }
|
||||
tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.7", default-features = false }
|
||||
@@ -53,7 +57,7 @@ once_cell = "1.20.2"
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.19"
|
||||
num-derive = "0.4.2"
|
||||
bigdecimal = "0.4.5"
|
||||
bigdecimal = "0.4.7"
|
||||
|
||||
# Web framework
|
||||
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
|
||||
@@ -67,37 +71,40 @@ dashmap = "6.1.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.31"
|
||||
tokio = { version = "1.41.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||
tokio = { version = "1.43.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.213", features = ["derive"] }
|
||||
serde_json = "1.0.132"
|
||||
serde = { version = "1.0.217", features = ["derive"] }
|
||||
serde_json = "1.0.138"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "2.2.4", features = ["chrono", "r2d2", "numeric"] }
|
||||
diesel = { version = "2.2.7", features = ["chrono", "r2d2", "numeric"] }
|
||||
diesel_migrations = "2.2.0"
|
||||
diesel_logger = { version = "0.3.0", optional = true }
|
||||
diesel_logger = { version = "0.4.0", optional = true }
|
||||
|
||||
derive_more = { version = "1.0.0", features = ["from", "into", "as_ref", "deref", "display"] }
|
||||
diesel-derive-newtype = "2.1.2"
|
||||
|
||||
# Bundled/Static SQLite
|
||||
libsqlite3-sys = { version = "0.30.1", features = ["bundled"], optional = true }
|
||||
libsqlite3-sys = { version = "0.31.0", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
rand = "0.9.0"
|
||||
ring = "0.17.8"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "1.11.0", features = ["v4"] }
|
||||
uuid = { version = "1.12.1", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.10.0"
|
||||
time = "0.3.36"
|
||||
chrono = { version = "0.4.39", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.10.1"
|
||||
time = "0.3.37"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler_ng = "2.0.5"
|
||||
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.6.0"
|
||||
data-encoding = "2.7.0"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "9.3.0"
|
||||
@@ -106,56 +113,56 @@ jsonwebtoken = "9.3.0"
|
||||
totp-lite = "2.0.1"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||
yubico = { version = "0.12.0", features = ["online-tokio"], default-features = false }
|
||||
|
||||
# WebAuthn libraries
|
||||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Handling of URL's for WebAuthn and favicons
|
||||
url = "2.5.2"
|
||||
url = "2.5.4"
|
||||
|
||||
# Email libraries
|
||||
lettre = { version = "0.11.10", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
lettre = { version = "0.11.11", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.9"
|
||||
|
||||
# HTML Template library
|
||||
handlebars = { version = "6.1.0", features = ["dir_source"] }
|
||||
handlebars = { version = "6.3.0", features = ["dir_source"] }
|
||||
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.12.8", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
hickory-resolver = "0.24.1"
|
||||
reqwest = { version = "0.12.12", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
hickory-resolver = "0.24.2"
|
||||
|
||||
# Favicon extraction libraries
|
||||
html5gum = "0.5.7"
|
||||
regex = { version = "1.11.0", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
html5gum = "0.7.0"
|
||||
regex = { version = "1.11.1", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.3.1"
|
||||
bytes = "1.8.0"
|
||||
bytes = "1.9.0"
|
||||
|
||||
# Cache function results (Used for version check and favicon fetching)
|
||||
cached = { version = "0.53.1", features = ["async"] }
|
||||
cached = { version = "0.54.0", features = ["async"] }
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.18.1"
|
||||
cookie_store = "0.21.0"
|
||||
cookie_store = "0.21.1"
|
||||
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.68"
|
||||
openssl = "0.10.69"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.15"
|
||||
governor = "0.7.0"
|
||||
governor = "0.8.0"
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.23"
|
||||
semver = "1.0.25"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
|
||||
which = "6.0.3"
|
||||
which = "7.0.1"
|
||||
|
||||
# Argon2 library with support for the PHC format
|
||||
argon2 = "0.5.3"
|
||||
@@ -163,6 +170,13 @@ argon2 = "0.5.3"
|
||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||
rpassword = "7.3.1"
|
||||
|
||||
# Loading a dynamic CSS Stylesheet
|
||||
grass_compiler = { version = "0.13.4", default-features = false }
|
||||
|
||||
[patch.crates-io]
|
||||
# Patch yubico to remove duplicate crates of older versions
|
||||
yubico = { git = "https://github.com/BlackDex/yubico-rs", rev = "00df14811f58155c0f02e3ab10f1570ed3e115c6" }
|
||||
|
||||
# Strip debuginfo from the release builds
|
||||
# The symbols are the provide better panic traces
|
||||
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||
@@ -213,7 +227,8 @@ noop_method_call = "deny"
|
||||
refining_impl_trait = { level = "deny", priority = -1 }
|
||||
rust_2018_idioms = { level = "deny", priority = -1 }
|
||||
rust_2021_compatibility = { level = "deny", priority = -1 }
|
||||
# rust_2024_compatibility = { level = "deny", priority = -1 } # Enable once we are at MSRV 1.81.0
|
||||
rust_2024_compatibility = { level = "deny", priority = -1 }
|
||||
edition_2024_expr_fragment_specifier = "allow" # Once changed to Rust 2024 this should be removed and macro's should be validated again
|
||||
single_use_lifetimes = "deny"
|
||||
trivial_casts = "deny"
|
||||
trivial_numeric_casts = "deny"
|
||||
@@ -222,9 +237,10 @@ unused_import_braces = "deny"
|
||||
unused_lifetimes = "deny"
|
||||
unused_qualifications = "deny"
|
||||
variant_size_differences = "deny"
|
||||
# The lints below are part of the rust_2024_compatibility group
|
||||
static-mut-refs = "deny"
|
||||
unsafe-op-in-unsafe-fn = "deny"
|
||||
# Allow the following lints since these cause issues with Rust v1.84.0 or newer
|
||||
# Building Vaultwarden with Rust v1.85.0 and edition 2024 also works without issues
|
||||
if_let_rescope = "allow"
|
||||
tail_expr_drop_order = "allow"
|
||||
|
||||
# https://rust-lang.github.io/rust-clippy/stable/index.html
|
||||
[lints.clippy]
|
||||
|
||||
202
README.md
202
README.md
@@ -1,102 +1,144 @@
|
||||
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/download/)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||

|
||||
|
||||
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
An alternative server implementation of the Bitwarden Client API, written in Rust and compatible with [official Bitwarden clients](https://bitwarden.com/download/) [[disclaimer](#disclaimer)], perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
|
||||
|
||||
---
|
||||
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml)
|
||||
[](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://quay.io/repository/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden)
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://quay.io/repository/vaultwarden/server) <br>
|
||||
[](https://github.com/dani-garcia/vaultwarden/graphs/contributors)
|
||||
[](https://github.com/dani-garcia/vaultwarden/network/members)
|
||||
[](https://github.com/dani-garcia/vaultwarden/stargazers)
|
||||
[](https://github.com/dani-garcia/vaultwarden/issues)
|
||||
[](https://github.com/dani-garcia/vaultwarden/issues?q=is%3Aissue+is%3Aclosed)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt) <br>
|
||||
[%3D'svg'%5D%2F*%5Blocal-name()%3D'g'%5D%5B2%5D%2F*%5Blocal-name()%3D'text'%5D%5B4%5D&style=flat-square&logo=rust&label=dependencies&color=005AA4)](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/release.yml)
|
||||
[](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml) <br>
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
[](https://github.com/dani-garcia/vaultwarden/discussions)
|
||||
[](https://vaultwarden.discourse.group/)
|
||||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.**
|
||||
> [!IMPORTANT]
|
||||
> **When using this server, please report any bugs or suggestions directly to us (see [Get in touch](#get-in-touch)), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official Bitwarden support channels.**
|
||||
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
|
||||
---
|
||||
<br>
|
||||
|
||||
## Features
|
||||
|
||||
Basically full implementation of Bitwarden API is provided including:
|
||||
A nearly complete implementation of the Bitwarden Client API is provided, including:
|
||||
|
||||
* Organizations support
|
||||
* Attachments and Send
|
||||
* Vault API support
|
||||
* Serving the static files for Vault interface
|
||||
* Website icons API
|
||||
* Authenticator and U2F support
|
||||
* YubiKey and Duo support
|
||||
* Emergency Access
|
||||
* [Personal Vault](https://bitwarden.com/help/managing-items/)
|
||||
* [Send](https://bitwarden.com/help/about-send/)
|
||||
* [Attachments](https://bitwarden.com/help/attachments/)
|
||||
* [Website icons](https://bitwarden.com/help/website-icons/)
|
||||
* [Personal API Key](https://bitwarden.com/help/personal-api-key/)
|
||||
* [Organizations](https://bitwarden.com/help/getting-started-organizations/)
|
||||
- [Collections](https://bitwarden.com/help/about-collections/),
|
||||
[Password Sharing](https://bitwarden.com/help/sharing/),
|
||||
[Member Roles](https://bitwarden.com/help/user-types-access-control/),
|
||||
[Groups](https://bitwarden.com/help/about-groups/),
|
||||
[Event Logs](https://bitwarden.com/help/event-logs/),
|
||||
[Admin Password Reset](https://bitwarden.com/help/admin-reset/),
|
||||
[Directory Connector](https://bitwarden.com/help/directory-sync/),
|
||||
[Policies](https://bitwarden.com/help/policies/)
|
||||
* [Multi/Two Factor Authentication](https://bitwarden.com/help/bitwarden-field-guide-two-step-login/)
|
||||
- [Authenticator](https://bitwarden.com/help/setup-two-step-login-authenticator/),
|
||||
[Email](https://bitwarden.com/help/setup-two-step-login-email/),
|
||||
[FIDO2 WebAuthn](https://bitwarden.com/help/setup-two-step-login-fido/),
|
||||
[YubiKey](https://bitwarden.com/help/setup-two-step-login-yubikey/),
|
||||
[Duo](https://bitwarden.com/help/setup-two-step-login-duo/)
|
||||
* [Emergency Access](https://bitwarden.com/help/emergency-access/)
|
||||
* [Vaultwarden Admin Backend](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page)
|
||||
* [Modified Web Vault client](https://github.com/dani-garcia/bw_web_builds) (Bundled within our containers)
|
||||
|
||||
## Installation
|
||||
Pull the docker image and mount a volume from the host for persistent storage:
|
||||
|
||||
```sh
|
||||
docker pull vaultwarden/server:latest
|
||||
docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p 80:80 vaultwarden/server:latest
|
||||
```
|
||||
This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you.
|
||||
|
||||
**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
|
||||
This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
|
||||
If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above).
|
||||
<br>
|
||||
|
||||
## Usage
|
||||
See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost.
|
||||
>
|
||||
>This can be configured in [Vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)).
|
||||
>
|
||||
>If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy or Traefik (see examples linked above).
|
||||
|
||||
> [!TIP]
|
||||
>**For more detailed examples on how to install, use and configure Vaultwarden you can check our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).**
|
||||
|
||||
The main way to use Vaultwarden is via our container images which are published to [ghcr.io](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden), [docker.io](https://hub.docker.com/r/vaultwarden/server) and [quay.io](https://quay.io/repository/vaultwarden/server).
|
||||
|
||||
There are also [community driven packages](https://github.com/dani-garcia/vaultwarden/wiki/Third-party-packages) which can be used, but those might be lagging behind the latest version or might deviate in the way Vaultwarden is configured, as described in our [Wiki](https://github.com/dani-garcia/vaultwarden/wiki).
|
||||
|
||||
### Docker/Podman CLI
|
||||
|
||||
Pull the container image and mount a volume from the host for persistent storage.<br>
|
||||
You can replace `docker` with `podman` if you prefer to use podman.
|
||||
|
||||
```shell
|
||||
docker pull vaultwarden/server:latest
|
||||
docker run --detach --name vaultwarden \
|
||||
--env DOMAIN="https://vw.domain.tld" \
|
||||
--volume /vw-data/:/data/ \
|
||||
--restart unless-stopped \
|
||||
--publish 80:80 \
|
||||
vaultwarden/server:latest
|
||||
```
|
||||
|
||||
This will preserve any persistent data under `/vw-data/`, you can adapt the path to whatever suits you.
|
||||
|
||||
### Docker Compose
|
||||
|
||||
To use Docker compose you need to create a `compose.yaml` which will hold the configuration to run the Vaultwarden container.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
vaultwarden:
|
||||
image: vaultwarden/server:latest
|
||||
container_name: vaultwarden
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
DOMAIN: "https://vw.domain.tld"
|
||||
volumes:
|
||||
- ./vw-data/:/data/
|
||||
ports:
|
||||
- 80:80
|
||||
```
|
||||
|
||||
<br>
|
||||
|
||||
## Get in touch
|
||||
To ask a question, offer suggestions or new features or to get help configuring or installing the software, please use [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [the forum](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure you are on the latest version and there aren't any similar issues open, though!
|
||||
Have a question, suggestion or need help? Join our community on [Matrix](https://matrix.to/#/#vaultwarden:matrix.org), [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [Discourse Forums](https://vaultwarden.discourse.group/).
|
||||
|
||||
If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us!
|
||||
Encountered a bug or crash? Please search our issue tracker and discussions to see if it's already been reported. If not, please [start a new discussion](https://github.com/dani-garcia/vaultwarden/discussions) or [create a new issue](https://github.com/dani-garcia/vaultwarden/issues/). Ensure you're using the latest version of Vaultwarden and there aren't any similar issues open or closed!
|
||||
|
||||
<br>
|
||||
|
||||
## Contributors
|
||||
|
||||
### Sponsors
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
<!--
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/username">
|
||||
<img src="https://avatars.githubusercontent.com/u/725423?s=75&v=4" width="75px;" alt="username"/>
|
||||
<br />
|
||||
<sub><b>username</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
[](https://github.com/dani-garcia/vaultwarden/graphs/contributors)<br>
|
||||
[](https://github.com/dani-garcia/vaultwarden/graphs/contributors)
|
||||
|
||||
<br/>
|
||||
-->
|
||||
<br>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/themightychris" style="width: 75px">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/numberly" style="width: 75px">
|
||||
<sub><b>Numberly</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/IQ333777" style="width: 75px">
|
||||
<sub><b>IQ333777</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
## Disclaimer
|
||||
|
||||
**This project is not associated with [Bitwarden](https://bitwarden.com/) or Bitwarden, Inc.**
|
||||
|
||||
However, one of the active maintainers for Vaultwarden is employed by Bitwarden and is allowed to contribute to the project on their own time. These contributions are independent of Bitwarden and are reviewed by other maintainers.
|
||||
|
||||
The maintainers work together to set the direction for the project, focusing on serving the self-hosting community, including individuals, families, and small organizations, while ensuring the project's sustainability.
|
||||
|
||||
**Please note:** We cannot be held liable for any data loss that may occur while using Vaultwarden. This includes passwords, attachments, and other information handled by the application. We highly recommend performing regular backups of your files and database. However, should you experience data loss, we encourage you to contact us immediately.
|
||||
|
||||
<br>
|
||||
|
||||
## Bitwarden_RS
|
||||
|
||||
This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues.<br>
|
||||
Please see [#1642 - v1.21.0 release and project rename to Vaultwarden](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.
|
||||
|
||||
@@ -21,7 +21,7 @@ notify us. We welcome working with you to resolve the issue promptly. Thanks in
|
||||
The following bug classes are out-of scope:
|
||||
|
||||
- Bugs that are already reported on Vaultwarden's issue tracker (https://github.com/dani-garcia/vaultwarden/issues)
|
||||
- Bugs that are not part of Vaultwarden, like on the the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated
|
||||
- Bugs that are not part of Vaultwarden, like on the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated
|
||||
- Issues in an upstream software dependency (ex: Rust, or External Libraries) which are already reported to the upstream maintainer
|
||||
- Attacks requiring physical access to a user's device
|
||||
- Issues related to software or protocols not under Vaultwarden's control
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
vault_version: "v2024.6.2c"
|
||||
vault_image_digest: "sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b"
|
||||
# Cross Compile Docker Helper Scripts v1.5.0
|
||||
vault_version: "v2025.1.1"
|
||||
vault_image_digest: "sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918"
|
||||
# Cross Compile Docker Helper Scripts v1.6.1
|
||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
|
||||
xx_image_digest: "sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa"
|
||||
rust_version: 1.82.0 # Rust version to be used
|
||||
xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894"
|
||||
rust_version: 1.84.1 # Rust version to be used
|
||||
debian_version: bookworm # Debian release name to be used
|
||||
alpine_version: "3.20" # Alpine version to be used
|
||||
alpine_version: "3.21" # Alpine version to be used
|
||||
# For which platforms/architectures will we try to build images
|
||||
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||
# Determine the build images per OS/Arch
|
||||
|
||||
@@ -19,23 +19,23 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2025.1.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.1.1
|
||||
# [docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b
|
||||
# [docker.io/vaultwarden/web-vault:v2024.6.2c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918
|
||||
# [docker.io/vaultwarden/web-vault:v2025.1.1]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918 AS vault
|
||||
|
||||
########################## ALPINE BUILD IMAGES ##########################
|
||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.82.0 AS build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.82.0 AS build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.82.0 AS build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.82.0 AS build_armv6
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.84.1 AS build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.84.1 AS build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.84.1 AS build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.84.1 AS build_armv6
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
@@ -76,6 +76,7 @@ RUN source /env-cargo && \
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||
COPY ./macros ./macros
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
|
||||
@@ -126,7 +127,7 @@ RUN source /env-cargo && \
|
||||
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||
#
|
||||
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.20
|
||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.21
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
|
||||
@@ -19,24 +19,24 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c
|
||||
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b]
|
||||
# $ docker pull docker.io/vaultwarden/web-vault:v2025.1.1
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.1.1
|
||||
# [docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b
|
||||
# [docker.io/vaultwarden/web-vault:v2024.6.2c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918
|
||||
# [docker.io/vaultwarden/web-vault:v2025.1.1]
|
||||
#
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault
|
||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:cb6b2095a4afc1d9d243a33f6d09211f40e3d82c7ae829fd025df5ff175a4918 AS vault
|
||||
|
||||
########################## Cross Compile Docker Helper Scripts ##########################
|
||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||
## And these bash scripts do not have any significant difference if at all
|
||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa AS xx
|
||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894 AS xx
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.82.0-slim-bookworm AS build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.84.1-slim-bookworm AS build
|
||||
COPY --from=xx / /
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
@@ -116,6 +116,7 @@ RUN source /env-cargo && \
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||
COPY ./macros ./macros
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
|
||||
|
||||
@@ -143,6 +143,7 @@ RUN source /env-cargo && \
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||
COPY ./macros ./macros
|
||||
|
||||
ARG CARGO_PROFILE=release
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ There also is an option to use an other docker container to provide support for
|
||||
```bash
|
||||
# To install and activate
|
||||
docker run --privileged --rm tonistiigi/binfmt --install arm64,arm
|
||||
# To unistall
|
||||
# To uninstall
|
||||
docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||
```
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ variable "SOURCE_REPOSITORY_URL" {
|
||||
default = null
|
||||
}
|
||||
|
||||
// The commit hash of of the current commit this build was triggered on
|
||||
// The commit hash of the current commit this build was triggered on
|
||||
variable "SOURCE_COMMIT" {
|
||||
default = null
|
||||
}
|
||||
|
||||
13
macros/Cargo.toml
Normal file
13
macros/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "macros"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "macros"
|
||||
path = "src/lib.rs"
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
quote = "1.0.38"
|
||||
syn = "2.0.96"
|
||||
58
macros/src/lib.rs
Normal file
58
macros/src/lib.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
extern crate proc_macro;
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use quote::quote;
|
||||
|
||||
#[proc_macro_derive(UuidFromParam)]
|
||||
pub fn derive_uuid_from_param(input: TokenStream) -> TokenStream {
|
||||
let ast = syn::parse(input).unwrap();
|
||||
|
||||
impl_derive_uuid_macro(&ast)
|
||||
}
|
||||
|
||||
fn impl_derive_uuid_macro(ast: &syn::DeriveInput) -> TokenStream {
|
||||
let name = &ast.ident;
|
||||
let gen = quote! {
|
||||
#[automatically_derived]
|
||||
impl<'r> rocket::request::FromParam<'r> for #name {
|
||||
type Error = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
|
||||
if uuid::Uuid::parse_str(param).is_ok() {
|
||||
Ok(Self(param.to_string()))
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
gen.into()
|
||||
}
|
||||
|
||||
#[proc_macro_derive(IdFromParam)]
|
||||
pub fn derive_id_from_param(input: TokenStream) -> TokenStream {
|
||||
let ast = syn::parse(input).unwrap();
|
||||
|
||||
impl_derive_safestring_macro(&ast)
|
||||
}
|
||||
|
||||
fn impl_derive_safestring_macro(ast: &syn::DeriveInput) -> TokenStream {
|
||||
let name = &ast.ident;
|
||||
let gen = quote! {
|
||||
#[automatically_derived]
|
||||
impl<'r> rocket::request::FromParam<'r> for #name {
|
||||
type Error = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
|
||||
if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) {
|
||||
Ok(Self(param.to_string()))
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
gen.into()
|
||||
}
|
||||
5
migrations/mysql/2025-01-09-172300_add_manage/up.sql
Normal file
5
migrations/mysql/2025-01-09-172300_add_manage/up.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE users_collections
|
||||
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
ALTER TABLE collections_groups
|
||||
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE users_collections
|
||||
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
ALTER TABLE collections_groups
|
||||
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
5
migrations/sqlite/2025-01-09-172300_add_manage/up.sql
Normal file
5
migrations/sqlite/2025-01-09-172300_add_manage/up.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
ALTER TABLE users_collections
|
||||
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||
|
||||
ALTER TABLE collections_groups
|
||||
ADD COLUMN manage BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||
78
resources/vaultwarden-logo-auto.svg
Normal file
78
resources/vaultwarden-logo-auto.svg
Normal file
@@ -0,0 +1,78 @@
|
||||
<svg width="1365.8256" height="280.48944" version="1.1" viewBox="0 0 1365.8255 280.48944" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<style>
|
||||
@media (prefers-color-scheme: dark) {
|
||||
svg { -webkit-filter:invert(0.90); filter:invert(0.90); }
|
||||
}</style>
|
||||
<title>Vaultwarden Logo</title>
|
||||
<defs>
|
||||
<mask id="d">
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff"/>
|
||||
<circle id="b" cy="-40" r="3"/>
|
||||
<use transform="rotate(72)" xlink:href="#b"/>
|
||||
<use transform="rotate(144)" xlink:href="#b"/>
|
||||
<use transform="rotate(216)" xlink:href="#b"/>
|
||||
<use transform="rotate(-72)" xlink:href="#b"/>
|
||||
</mask>
|
||||
</defs>
|
||||
<g transform="translate(-10.708266,-9.2965379)" aria-label="aultwarden">
|
||||
<path d="m371.55338 223.43649-5.76172-14.84375h-0.78125q-7.51953 9.47266-15.52735 13.1836-7.91015 3.61328-20.70312 3.61328-15.72266 0-24.80469-8.98438-8.98437-8.98437-8.98437-25.58593 0-17.38282 12.10937-25.58594 12.20703-8.30078 36.71875-9.17969l18.94531-0.58594v-4.78515q0-16.60157-16.99218-16.60157-13.08594 0-30.76172 7.91016l-9.86328-20.11719q18.84765-9.86328 41.79687-9.86328 21.97266 0 33.69141 9.57031 11.71875 9.57032 11.71875 29.10157v72.7539zm-8.78907-50.58593-11.52343 0.39062q-12.98829 0.39063-19.33594 4.6875-6.34766 4.29688-6.34766 13.08594 0 12.59765 14.45313 12.59765 10.35156 0 16.5039-5.95703 6.25-5.95703 6.25-15.82031zm137.59766 50.58593-4.00391-13.96484h-1.5625q-4.78515 7.61719-13.57422 11.81641-8.78906 4.10156-20.01953 4.10156-19.23828 0-29.0039-10.25391-9.76563-10.35156-9.76563-29.6875v-71.1914h29.78516v63.76953q0 11.8164 4.19922 17.77343 4.19922 5.85938 13.3789 5.85938 12.5 0 18.06641-8.30078 5.56641-8.39844 5.56641-27.73438v-51.36718h29.78515v109.17968zm83.88672 0h-29.78516v-151.953122h29.78516zm77.24609-21.77734q7.8125 0 18.75-3.41797v22.16797q-11.13281 4.98047-27.34375 4.98047-17.87109 0-26.07422-8.98438-8.10547-9.08203-8.10547-27.14843v-52.63672h-14.25781v-12.59766l16.40625-9.96094 8.59375-23.046872h19.04297v23.242192h30.56641v22.36328h-30.56641v52.63672q0 6.34765 3.51563 9.375 3.61328 3.02734 9.47265 3.02734z"/>
|
||||
<path d="m791.27994 223.43649-19.62891-62.79297q-1.85547-5.76171-6.93359-26.17187h-0.78125q-3.90625 17.08984-6.83594 26.36719l-20.21484 62.59765h-18.75l-29.19922-107.03125h16.99219q10.35156 40.33203 15.72265 61.42578 5.46875 21.09375 6.25 28.41797h0.78125q1.07422-5.5664 3.41797-14.35547 2.44141-8.88671 4.19922-14.0625l19.62891-61.42578h17.57812l19.14063 61.42578q5.46875 16.79688 7.42187 28.22266h0.78125q0.39063-3.51562 2.05078-10.83984 1.75781-7.32422 20.41016-78.8086h16.79687l-29.58984 107.03125zm133.98437 0-3.22265-15.23437h-0.78125q-8.00782 10.05859-16.01563 13.67187-7.91015 3.51563-19.82422 3.51563-15.91797 0-25-8.20313-8.98437-8.20312-8.98437-23.33984 0-32.42188 51.85547-33.98438l18.16406-0.58593v-6.64063q0-12.59765-5.46875-18.55469-5.37109-6.05468-17.28516-6.05468-13.3789 0-30.27343 8.20312l-4.98047-12.40234q7.91015-4.29688 17.28515-6.73828 9.47266-2.44141 18.94532-2.44141 19.14062 0 28.32031 8.49609 9.27734 8.4961 9.27734 27.2461v73.04687zm-36.62109-11.42578q15.13672 0 23.73047-8.30078 8.6914-8.30078 8.6914-23.24219v-9.66797l-16.21093 0.6836q-19.33594 0.68359-27.92969 6.05469-8.49609 5.27343-8.49609 16.5039 0 8.78906 5.27343 13.37891 5.3711 4.58984 14.94141 4.58984zm130.85938-97.55859q7.1289 0 12.793 1.17187l-2.2461 15.03907q-6.6407-1.46485-11.7188-1.46485-12.9883 0-22.26561 10.54688-9.17968 10.54687-9.17968 26.26953v57.42187h-16.21094v-107.03125h13.37891l1.85546 19.82422h0.78125q5.95704-10.44922 14.35551-16.11328 8.3984-5.66406 18.457-5.66406zm101.6602 94.6289h-0.879q-11.2304 16.3086-33.5937 16.3086-20.9961 0-32.7148-14.35547-11.6211-14.35547-11.6211-40.82031 0-26.46485 11.7187-41.11328 11.7188-14.64844 32.6172-14.64844 21.7773 0 33.3984 15.82031h1.2696l-0.6836-7.71484-0.3907-7.51953v-43.554692h16.211v151.953122h-13.1836zm-32.4219 2.73438q16.6015 0 24.0234-8.98438 7.5195-9.08203 7.5195-29.19921v-3.41797q0-22.75391-7.6171-32.42188-7.5196-9.76562-24.1211-9.76562-14.2578 0-21.875 11.13281-7.5196 11.03516-7.5196 31.25 0 20.50781 7.5196 30.95703 7.5195 10.44922 22.0703 10.44922zm127.3437 13.57422q-23.7304 0-37.5-14.45313-13.6718-14.45312-13.6718-40.13672 0-25.8789 12.6953-41.11328 12.7929-15.23437 34.2773-15.23437 20.1172 0 31.8359 13.28125 11.7188 13.18359 11.7188 34.86328v10.25391h-73.7305q0.4883 18.84765 9.4727 28.61328 9.082 9.76562 25.4883 9.76562 17.2851 0 34.1797-7.22656v14.45312q-8.5938 3.71094-16.3086 5.27344-7.6172 1.66016-18.4571 1.66016zm-4.3945-97.36328q-12.8906 0-20.6055 8.39843-7.6172 8.39844-8.9843 23.24219h55.957q0-15.33203-6.836-23.4375-6.8359-8.20312-19.5312-8.20312zm144.6289 95.41015v-69.23828q0-13.08594-5.957-19.53125-5.9571-6.44531-18.6524-6.44531-16.7968 0-24.6093 9.08203t-7.8125 29.98047v56.15234h-16.211v-107.03125h13.1836l2.6367 14.64844h0.7813q4.9804-7.91016 13.9648-12.20703 8.9844-4.39453 20.0196-4.39453 19.3359 0 29.1015 9.375 9.7656 9.27734 9.7656 29.78515v69.82422z"/>
|
||||
</g>
|
||||
<g transform="translate(-10.708266,-9.2965379)">
|
||||
<g id="e" transform="matrix(2.6712834,0,0,2.6712834,150.95027,149.53854)">
|
||||
<g id="f" mask="url(#d)">
|
||||
<path d="m-31.1718-33.813208 26.496029 74.188883h9.3515399l26.49603-74.188883h-9.767164l-16.728866 47.588948q-1.662496 4.571864-2.805462 8.624198-1.142966 3.948427-1.870308 7.585137-.72734199-3.63671-1.8703079-7.689043-1.142966-4.052334-2.805462-8.728104l-16.624959-47.381136z" stroke="#000" stroke-width="4.51171"/>
|
||||
<circle transform="scale(-1,1)" r="43" fill="none" stroke="#000" stroke-width="9"/>
|
||||
<g id="g" transform="scale(-1,1)">
|
||||
<polygon id="a" points="46 -3 46 3 51 0" stroke="#000" stroke-linejoin="round" stroke-width="3"/>
|
||||
<use transform="rotate(11.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(22.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(33.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(45)" xlink:href="#a"/>
|
||||
<use transform="rotate(56.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(67.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(78.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(90)" xlink:href="#a"/>
|
||||
<use transform="rotate(101.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(112.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(123.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(135)" xlink:href="#a"/>
|
||||
<use transform="rotate(146.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(157.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(168.75)" xlink:href="#a"/>
|
||||
<use transform="scale(-1)" xlink:href="#a"/>
|
||||
<use transform="rotate(191.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(202.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(213.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(225)" xlink:href="#a"/>
|
||||
<use transform="rotate(236.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(247.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(258.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(-90)" xlink:href="#a"/>
|
||||
<use transform="rotate(-78.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(-67.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(-56.25)" xlink:href="#a"/>
|
||||
<use transform="rotate(-45)" xlink:href="#a"/>
|
||||
<use transform="rotate(-33.75)" xlink:href="#a"/>
|
||||
<use transform="rotate(-22.5)" xlink:href="#a"/>
|
||||
<use transform="rotate(-11.25)" xlink:href="#a"/>
|
||||
</g>
|
||||
<g id="h" transform="scale(-1,1)">
|
||||
<polygon id="c" points="7 -42 -7 -42 0 -35" stroke="#000" stroke-linejoin="round" stroke-width="6"/>
|
||||
<use transform="rotate(72)" xlink:href="#c"/>
|
||||
<use transform="rotate(144)" xlink:href="#c"/>
|
||||
<use transform="rotate(216)" xlink:href="#c"/>
|
||||
<use transform="rotate(-72)" xlink:href="#c"/>
|
||||
</g>
|
||||
</g>
|
||||
<mask>
|
||||
<rect x="-60" y="-60" width="120" height="120" fill="#fff"/>
|
||||
<circle cy="-40" r="3"/>
|
||||
<use transform="rotate(72)" xlink:href="#b"/>
|
||||
<use transform="rotate(144)" xlink:href="#b"/>
|
||||
<use transform="rotate(216)" xlink:href="#b"/>
|
||||
<use transform="rotate(-72)" xlink:href="#b"/>
|
||||
</mask>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 7.6 KiB |
@@ -1,4 +1,4 @@
|
||||
[toolchain]
|
||||
channel = "1.82.0"
|
||||
channel = "1.84.1"
|
||||
components = [ "rustfmt", "clippy" ]
|
||||
profile = "minimal"
|
||||
|
||||
154
src/api/admin.rs
154
src/api/admin.rs
@@ -50,7 +50,7 @@ pub fn routes() -> Vec<Route> {
|
||||
disable_user,
|
||||
enable_user,
|
||||
remove_2fa,
|
||||
update_user_org_type,
|
||||
update_membership_type,
|
||||
update_revision_users,
|
||||
post_config,
|
||||
delete_config,
|
||||
@@ -62,6 +62,7 @@ pub fn routes() -> Vec<Route> {
|
||||
diagnostics,
|
||||
get_diagnostics_config,
|
||||
resend_user_invite,
|
||||
get_diagnostics_http,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -98,6 +99,7 @@ const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z";
|
||||
const BASE_TEMPLATE: &str = "admin/base";
|
||||
|
||||
const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000";
|
||||
pub const FAKE_ADMIN_UUID: &str = "00000000-0000-0000-0000-000000000000";
|
||||
|
||||
fn admin_path() -> String {
|
||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||
@@ -169,7 +171,7 @@ struct LoginForm {
|
||||
redirect: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
#[post("/", format = "application/x-www-form-urlencoded", data = "<data>")]
|
||||
fn post_admin_login(
|
||||
data: Form<LoginForm>,
|
||||
cookies: &CookieJar<'_>,
|
||||
@@ -279,15 +281,15 @@ struct InviteData {
|
||||
email: String,
|
||||
}
|
||||
|
||||
async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn).await {
|
||||
async fn get_user_or_404(user_id: &UserId, conn: &mut DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(user_id, conn).await {
|
||||
Ok(user)
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/invite", data = "<data>")]
|
||||
#[post("/invite", format = "application/json", data = "<data>")]
|
||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||
@@ -298,7 +300,9 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbCon
|
||||
|
||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(user, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
|
||||
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
|
||||
mail::send_invite(user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
let invitation = Invitation::new(&user.email);
|
||||
invitation.save(conn).await
|
||||
@@ -311,7 +315,7 @@ async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbCon
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[post("/test/smtp", data = "<data>")]
|
||||
#[post("/test/smtp", format = "application/json", data = "<data>")]
|
||||
async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
|
||||
@@ -380,29 +384,29 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn)
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>")]
|
||||
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(uuid, &mut conn).await?;
|
||||
#[get("/users/<user_id>")]
|
||||
async fn get_user_json(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(&user_id, &mut conn).await?;
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["userEnabled"] = json!(u.enabled);
|
||||
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(uuid, &mut conn).await?;
|
||||
#[post("/users/<user_id>/delete", format = "application/json")]
|
||||
async fn delete_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(&user_id, &mut conn).await?;
|
||||
|
||||
// Get the user_org records before deleting the actual user
|
||||
let user_orgs = UserOrganization::find_any_state_by_user(uuid, &mut conn).await;
|
||||
// Get the membership records before deleting the actual user
|
||||
let memberships = Membership::find_any_state_by_user(&user_id, &mut conn).await;
|
||||
let res = user.delete(&mut conn).await;
|
||||
|
||||
for user_org in user_orgs {
|
||||
for membership in memberships {
|
||||
log_event(
|
||||
EventType::OrganizationUserRemoved as i32,
|
||||
&user_org.uuid,
|
||||
&user_org.org_uuid,
|
||||
ACTING_ADMIN_USER,
|
||||
EventType::OrganizationUserDeleted as i32,
|
||||
&membership.uuid,
|
||||
&membership.org_uuid,
|
||||
&ACTING_ADMIN_USER.into(),
|
||||
14, // Use UnknownBrowser type
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
@@ -413,9 +417,9 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
|
||||
res
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
#[post("/users/<user_id>/deauth", format = "application/json")]
|
||||
async fn deauth_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||
|
||||
nt.send_logout(&user, None).await;
|
||||
|
||||
@@ -434,9 +438,9 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
#[post("/users/<user_id>/disable", format = "application/json")]
|
||||
async fn disable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
@@ -448,33 +452,35 @@ async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Noti
|
||||
save_result
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
#[post("/users/<user_id>/enable", format = "application/json")]
|
||||
async fn enable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||
#[post("/users/<user_id>/remove-2fa", format = "application/json")]
|
||||
async fn remove_2fa(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&user_id, &mut conn).await?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
|
||||
two_factor::enforce_2fa_policy(&user, &ACTING_ADMIN_USER.into(), 14, &token.ip.ip, &mut conn).await?;
|
||||
user.totp_recover = None;
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/invite/resend")]
|
||||
async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
if let Some(user) = User::find_by_uuid(uuid, &mut conn).await {
|
||||
#[post("/users/<user_id>/invite/resend", format = "application/json")]
|
||||
async fn resend_user_invite(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
if let Some(user) = User::find_by_uuid(&user_id, &mut conn).await {
|
||||
//TODO: replace this with user.status check when it will be available (PR#3397)
|
||||
if !user.password_hash.is_empty() {
|
||||
err_code!("User already accepted invitation", Status::BadRequest.code);
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into();
|
||||
let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into();
|
||||
mail::send_invite(&user, org_id, member_id, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
@@ -484,42 +490,41 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct UserOrgTypeData {
|
||||
struct MembershipTypeData {
|
||||
user_type: NumberOrString,
|
||||
user_uuid: String,
|
||||
org_uuid: String,
|
||||
user_uuid: UserId,
|
||||
org_uuid: OrganizationId,
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
#[post("/users/org_type", format = "application/json", data = "<data>")]
|
||||
async fn update_membership_type(data: Json<MembershipTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let data: MembershipTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit =
|
||||
match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
let Some(mut member_to_edit) = Membership::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await
|
||||
else {
|
||||
err!("The specified user isn't member of the organization")
|
||||
};
|
||||
|
||||
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||
let new_type = match MembershipType::from_str(&data.user_type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
None => err!("Invalid type"),
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
if member_to_edit.atype == MembershipType::Owner && new_type != MembershipType::Owner {
|
||||
// Removing owner permission, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 {
|
||||
if Membership::count_confirmed_by_org_and_type(&data.org_uuid, MembershipType::Owner, &mut conn).await <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
}
|
||||
|
||||
// This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type
|
||||
// This check is also done at api::organizations::{accept_invite, _confirm_invite, _activate_member, edit_member}, update_membership_type
|
||||
// It returns different error messages per function.
|
||||
if new_type < UserOrgType::Admin {
|
||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
||||
if new_type < MembershipType::Admin {
|
||||
match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &member_to_edit.org_uuid, true, &mut conn).await {
|
||||
Ok(_) => {}
|
||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||
if CONFIG.email_2fa_auto_fallback() {
|
||||
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?;
|
||||
two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &mut conn).await?;
|
||||
} else {
|
||||
err!("You cannot modify this user to this type because they have not setup 2FA");
|
||||
}
|
||||
@@ -532,20 +537,20 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationUserUpdated as i32,
|
||||
&user_to_edit.uuid,
|
||||
&member_to_edit.uuid,
|
||||
&data.org_uuid,
|
||||
ACTING_ADMIN_USER,
|
||||
&ACTING_ADMIN_USER.into(),
|
||||
14, // Use UnknownBrowser type
|
||||
&token.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
user_to_edit.atype = new_type;
|
||||
user_to_edit.save(&mut conn).await
|
||||
member_to_edit.atype = new_type;
|
||||
member_to_edit.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
#[post("/users/update_revision", format = "application/json")]
|
||||
async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&mut conn).await
|
||||
}
|
||||
@@ -556,7 +561,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
||||
let mut organizations_json = Vec::with_capacity(organizations.len());
|
||||
for o in organizations {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["user_count"] = json!(Membership::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||
@@ -570,9 +575,9 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/organizations/<uuid>/delete")]
|
||||
async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||
#[post("/organizations/<org_id>/delete", format = "application/json")]
|
||||
async fn delete_organization(org_id: OrganizationId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&org_id, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||
org.delete(&mut conn).await
|
||||
}
|
||||
|
||||
@@ -601,9 +606,8 @@ async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
}
|
||||
|
||||
async fn has_http_access() -> bool {
|
||||
let req = match make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") {
|
||||
Ok(r) => r,
|
||||
Err(_) => return false,
|
||||
let Ok(req) = make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") else {
|
||||
return false;
|
||||
};
|
||||
match req.send().await {
|
||||
Ok(r) => r.status().is_success(),
|
||||
@@ -713,6 +717,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
"ip_header_name": ip_header_name,
|
||||
"ip_header_config": &CONFIG.ip_header(),
|
||||
"uses_proxy": uses_proxy,
|
||||
"enable_websocket": &CONFIG.enable_websocket(),
|
||||
"db_type": *DB_TYPE,
|
||||
"db_version": get_sql_server_version(&mut conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||
@@ -728,22 +733,27 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/diagnostics/config")]
|
||||
#[get("/diagnostics/config", format = "application/json")]
|
||||
fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
|
||||
let support_json = CONFIG.get_support_json();
|
||||
Json(support_json)
|
||||
}
|
||||
|
||||
#[post("/config", data = "<data>")]
|
||||
#[get("/diagnostics/http?<code>")]
|
||||
fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult {
|
||||
err_code!(format!("Testing error {code} response"), code);
|
||||
}
|
||||
|
||||
#[post("/config", format = "application/json", data = "<data>")]
|
||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
let data: ConfigBuilder = data.into_inner();
|
||||
if let Err(e) = CONFIG.update_config(data) {
|
||||
if let Err(e) = CONFIG.update_config(data, true) {
|
||||
err!(format!("Unable to save config: {e:?}"))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/config/delete")]
|
||||
#[post("/config/delete", format = "application/json")]
|
||||
fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
if let Err(e) = CONFIG.delete_user_config() {
|
||||
err!(format!("Unable to delete config: {e:?}"))
|
||||
@@ -751,7 +761,7 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
#[post("/config/backup_db", format = "application/json")]
|
||||
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> ApiResult<String> {
|
||||
if *CAN_BACKUP {
|
||||
match backup_database(&mut conn).await {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::db::DbPool;
|
||||
use chrono::{SecondsFormat, Utc};
|
||||
use chrono::Utc;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
@@ -13,7 +15,7 @@ use crate::{
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail,
|
||||
util::NumberOrString,
|
||||
util::{format_date, NumberOrString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -28,6 +30,7 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
profile,
|
||||
put_profile,
|
||||
post_profile,
|
||||
put_avatar,
|
||||
get_public_keys,
|
||||
post_keys,
|
||||
post_password,
|
||||
@@ -40,9 +43,8 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
post_verify_email_token,
|
||||
post_delete_recover,
|
||||
post_delete_recover_token,
|
||||
post_device_token,
|
||||
delete_account,
|
||||
post_delete_account,
|
||||
delete_account,
|
||||
revision_date,
|
||||
password_hint,
|
||||
prelogin,
|
||||
@@ -50,7 +52,9 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
api_key,
|
||||
rotate_api_key,
|
||||
get_known_device,
|
||||
put_avatar,
|
||||
get_all_devices,
|
||||
get_device,
|
||||
post_device_token,
|
||||
put_device_token,
|
||||
put_clear_device_token,
|
||||
post_clear_device_token,
|
||||
@@ -77,7 +81,7 @@ pub struct RegisterData {
|
||||
name: Option<String>,
|
||||
token: Option<String>,
|
||||
#[allow(dead_code)]
|
||||
organization_user_id: Option<String>,
|
||||
organization_user_id: Option<MembershipId>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -104,15 +108,15 @@ fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
async fn is_email_2fa_required(org_user_uuid: Option<String>, conn: &mut DbConn) -> bool {
|
||||
async fn is_email_2fa_required(member_id: Option<MembershipId>, conn: &mut DbConn) -> bool {
|
||||
if !CONFIG._enable_email_2fa() {
|
||||
return false;
|
||||
}
|
||||
if CONFIG.email_2fa_enforce_on_verified_invite() {
|
||||
return true;
|
||||
}
|
||||
if org_user_uuid.is_some() {
|
||||
return OrgPolicy::is_enabled_for_member(&org_user_uuid.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn)
|
||||
if member_id.is_some() {
|
||||
return OrgPolicy::is_enabled_for_member(&member_id.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn)
|
||||
.await;
|
||||
}
|
||||
false
|
||||
@@ -159,9 +163,9 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
|
||||
err!("Registration email does not match invite email")
|
||||
}
|
||||
} else if Invitation::take(&email, &mut conn).await {
|
||||
for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
||||
user_org.status = UserOrgStatus::Accepted as i32;
|
||||
user_org.save(&mut conn).await?;
|
||||
for membership in Membership::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
||||
membership.status = MembershipStatus::Accepted as i32;
|
||||
membership.save(&mut conn).await?;
|
||||
}
|
||||
user
|
||||
} else if CONFIG.is_signup_allowed(&email)
|
||||
@@ -303,9 +307,9 @@ async fn put_avatar(data: Json<AvatarData>, headers: Headers, mut conn: DbConn)
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>/public-key")]
|
||||
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(uuid, &mut conn).await {
|
||||
#[get("/users/<user_id>/public-key")]
|
||||
async fn get_public_keys(user_id: UserId, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&user_id, &mut conn).await {
|
||||
Some(user) if user.public_key.is_some() => user,
|
||||
Some(_) => err_code!("User has no public_key", Status::NotFound.code),
|
||||
None => err_code!("User doesn't exist", Status::NotFound.code),
|
||||
@@ -364,7 +368,12 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, mut conn: D
|
||||
&data.new_master_password_hash,
|
||||
Some(data.key),
|
||||
true,
|
||||
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
|
||||
Some(vec![
|
||||
String::from("post_rotatekey"),
|
||||
String::from("get_contacts"),
|
||||
String::from("get_public_keys"),
|
||||
String::from("get_api_webauthn"),
|
||||
]),
|
||||
);
|
||||
|
||||
let save_result = user.save(&mut conn).await;
|
||||
@@ -372,7 +381,7 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, mut conn: D
|
||||
// Prevent logging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -432,7 +441,7 @@ async fn post_kdf(data: Json<ChangeKdfData>, headers: Headers, mut conn: DbConn,
|
||||
user.set_password(&data.new_master_password_hash, Some(data.key), true, None);
|
||||
let save_result = user.save(&mut conn).await;
|
||||
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -443,21 +452,21 @@ struct UpdateFolderData {
|
||||
// There is a bug in 2024.3.x which adds a `null` item.
|
||||
// To bypass this we allow a Option here, but skip it during the updates
|
||||
// See: https://github.com/bitwarden/clients/issues/8453
|
||||
id: Option<String>,
|
||||
id: Option<FolderId>,
|
||||
name: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateEmergencyAccessData {
|
||||
id: String,
|
||||
id: EmergencyAccessId,
|
||||
key_encrypted: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateResetPasswordData {
|
||||
organization_id: String,
|
||||
organization_id: OrganizationId,
|
||||
reset_password_key: String,
|
||||
}
|
||||
|
||||
@@ -477,6 +486,61 @@ struct KeyData {
|
||||
private_key: String,
|
||||
}
|
||||
|
||||
fn validate_keydata(
|
||||
data: &KeyData,
|
||||
existing_ciphers: &[Cipher],
|
||||
existing_folders: &[Folder],
|
||||
existing_emergency_access: &[EmergencyAccess],
|
||||
existing_memberships: &[Membership],
|
||||
existing_sends: &[Send],
|
||||
) -> EmptyResult {
|
||||
// Check that we're correctly rotating all the user's ciphers
|
||||
let existing_cipher_ids = existing_ciphers.iter().map(|c| &c.uuid).collect::<HashSet<&CipherId>>();
|
||||
let provided_cipher_ids = data
|
||||
.ciphers
|
||||
.iter()
|
||||
.filter(|c| c.organization_id.is_none())
|
||||
.filter_map(|c| c.id.as_ref())
|
||||
.collect::<HashSet<&CipherId>>();
|
||||
if !provided_cipher_ids.is_superset(&existing_cipher_ids) {
|
||||
err!("All existing ciphers must be included in the rotation")
|
||||
}
|
||||
|
||||
// Check that we're correctly rotating all the user's folders
|
||||
let existing_folder_ids = existing_folders.iter().map(|f| &f.uuid).collect::<HashSet<&FolderId>>();
|
||||
let provided_folder_ids = data.folders.iter().filter_map(|f| f.id.as_ref()).collect::<HashSet<&FolderId>>();
|
||||
if !provided_folder_ids.is_superset(&existing_folder_ids) {
|
||||
err!("All existing folders must be included in the rotation")
|
||||
}
|
||||
|
||||
// Check that we're correctly rotating all the user's emergency access keys
|
||||
let existing_emergency_access_ids =
|
||||
existing_emergency_access.iter().map(|ea| &ea.uuid).collect::<HashSet<&EmergencyAccessId>>();
|
||||
let provided_emergency_access_ids =
|
||||
data.emergency_access_keys.iter().map(|ea| &ea.id).collect::<HashSet<&EmergencyAccessId>>();
|
||||
if !provided_emergency_access_ids.is_superset(&existing_emergency_access_ids) {
|
||||
err!("All existing emergency access keys must be included in the rotation")
|
||||
}
|
||||
|
||||
// Check that we're correctly rotating all the user's reset password keys
|
||||
let existing_reset_password_ids =
|
||||
existing_memberships.iter().map(|m| &m.org_uuid).collect::<HashSet<&OrganizationId>>();
|
||||
let provided_reset_password_ids =
|
||||
data.reset_password_keys.iter().map(|rp| &rp.organization_id).collect::<HashSet<&OrganizationId>>();
|
||||
if !provided_reset_password_ids.is_superset(&existing_reset_password_ids) {
|
||||
err!("All existing reset password keys must be included in the rotation")
|
||||
}
|
||||
|
||||
// Check that we're correctly rotating all the user's sends
|
||||
let existing_send_ids = existing_sends.iter().map(|s| &s.uuid).collect::<HashSet<&SendId>>();
|
||||
let provided_send_ids = data.sends.iter().filter_map(|s| s.id.as_ref()).collect::<HashSet<&SendId>>();
|
||||
if !provided_send_ids.is_superset(&existing_send_ids) {
|
||||
err!("All existing sends must be included in the rotation")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/accounts/key", data = "<data>")]
|
||||
async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
// TODO: See if we can wrap everything within a SQL Transaction. If something fails it should revert everything.
|
||||
@@ -492,22 +556,36 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
|
||||
Cipher::validate_cipher_data(&data.ciphers)?;
|
||||
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let user_id = &headers.user.uuid;
|
||||
|
||||
// TODO: Ideally we'd do everything after this point in a single transaction.
|
||||
|
||||
let mut existing_ciphers = Cipher::find_owned_by_user(user_id, &mut conn).await;
|
||||
let mut existing_folders = Folder::find_by_user(user_id, &mut conn).await;
|
||||
let mut existing_emergency_access = EmergencyAccess::find_all_by_grantor_uuid(user_id, &mut conn).await;
|
||||
let mut existing_memberships = Membership::find_by_user(user_id, &mut conn).await;
|
||||
// We only rotate the reset password key if it is set.
|
||||
existing_memberships.retain(|m| m.reset_password_key.is_some());
|
||||
let mut existing_sends = Send::find_by_user(user_id, &mut conn).await;
|
||||
|
||||
validate_keydata(
|
||||
&data,
|
||||
&existing_ciphers,
|
||||
&existing_folders,
|
||||
&existing_emergency_access,
|
||||
&existing_memberships,
|
||||
&existing_sends,
|
||||
)?;
|
||||
|
||||
// Update folder data
|
||||
for folder_data in data.folders {
|
||||
// Skip `null` folder id entries.
|
||||
// See: https://github.com/bitwarden/clients/issues/8453
|
||||
if let Some(folder_id) = folder_data.id {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_id, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
None => err!("Folder doesn't exist"),
|
||||
let Some(saved_folder) = existing_folders.iter_mut().find(|f| f.uuid == folder_id) else {
|
||||
err!("Folder doesn't exist")
|
||||
};
|
||||
|
||||
if &saved_folder.user_uuid != user_uuid {
|
||||
err!("The folder is not owned by the user")
|
||||
}
|
||||
|
||||
saved_folder.name = folder_data.name;
|
||||
saved_folder.save(&mut conn).await?
|
||||
}
|
||||
@@ -515,12 +593,11 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
|
||||
// Update emergency access data
|
||||
for emergency_access_data in data.emergency_access_keys {
|
||||
let mut saved_emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(&emergency_access_data.id, user_uuid, &mut conn).await
|
||||
{
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access doesn't exist or is not owned by the user"),
|
||||
};
|
||||
let Some(saved_emergency_access) =
|
||||
existing_emergency_access.iter_mut().find(|ea| ea.uuid == emergency_access_data.id)
|
||||
else {
|
||||
err!("Emergency access doesn't exist or is not owned by the user")
|
||||
};
|
||||
|
||||
saved_emergency_access.key_encrypted = Some(emergency_access_data.key_encrypted);
|
||||
saved_emergency_access.save(&mut conn).await?
|
||||
@@ -528,26 +605,23 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
|
||||
// Update reset password data
|
||||
for reset_password_data in data.reset_password_keys {
|
||||
let mut user_org =
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &reset_password_data.organization_id, &mut conn)
|
||||
.await
|
||||
{
|
||||
Some(reset_password) => reset_password,
|
||||
None => err!("Reset password doesn't exist"),
|
||||
};
|
||||
let Some(membership) =
|
||||
existing_memberships.iter_mut().find(|m| m.org_uuid == reset_password_data.organization_id)
|
||||
else {
|
||||
err!("Reset password doesn't exist")
|
||||
};
|
||||
|
||||
user_org.reset_password_key = Some(reset_password_data.reset_password_key);
|
||||
user_org.save(&mut conn).await?
|
||||
membership.reset_password_key = Some(reset_password_data.reset_password_key);
|
||||
membership.save(&mut conn).await?
|
||||
}
|
||||
|
||||
// Update send data
|
||||
for send_data in data.sends {
|
||||
let mut send = match Send::find_by_uuid(send_data.id.as_ref().unwrap(), &mut conn).await {
|
||||
Some(send) => send,
|
||||
None => err!("Send doesn't exist"),
|
||||
let Some(send) = existing_sends.iter_mut().find(|s| &s.uuid == send_data.id.as_ref().unwrap()) else {
|
||||
err!("Send doesn't exist")
|
||||
};
|
||||
|
||||
update_send_from_data(&mut send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?;
|
||||
update_send_from_data(send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?;
|
||||
}
|
||||
|
||||
// Update cipher data
|
||||
@@ -555,20 +629,15 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
|
||||
for cipher_data in data.ciphers {
|
||||
if cipher_data.organization_id.is_none() {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.id.as_ref().unwrap(), &mut conn).await {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
let Some(saved_cipher) = existing_ciphers.iter_mut().find(|c| &c.uuid == cipher_data.id.as_ref().unwrap())
|
||||
else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
|
||||
err!("The cipher is not owned by the user")
|
||||
}
|
||||
|
||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||
// We force the users to logout after the user has been saved to try and prevent these issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None)
|
||||
.await?
|
||||
update_cipher_from_data(saved_cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?
|
||||
}
|
||||
}
|
||||
|
||||
@@ -584,7 +653,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
|
||||
// Prevent logging out the client where the user requested this endpoint from.
|
||||
// If you do logout the user it will causes issues at the client side.
|
||||
// Adding the device uuid will prevent this.
|
||||
nt.send_logout(&user, Some(headers.device.uuid)).await;
|
||||
nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
|
||||
|
||||
save_result
|
||||
}
|
||||
@@ -731,7 +800,7 @@ async fn post_verify_email(headers: Headers) -> EmptyResult {
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct VerifyEmailTokenData {
|
||||
user_id: String,
|
||||
user_id: UserId,
|
||||
token: String,
|
||||
}
|
||||
|
||||
@@ -739,16 +808,14 @@ struct VerifyEmailTokenData {
|
||||
async fn post_verify_email_token(data: Json<VerifyEmailTokenData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyEmailTokenData = data.into_inner();
|
||||
|
||||
let mut user = match User::find_by_uuid(&data.user_id, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
let Some(mut user) = User::find_by_uuid(&data.user_id, &mut conn).await else {
|
||||
err!("User doesn't exist")
|
||||
};
|
||||
|
||||
let claims = match decode_verify_email(&data.token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => err!("Invalid claim"),
|
||||
let Ok(claims) = decode_verify_email(&data.token) else {
|
||||
err!("Invalid claim")
|
||||
};
|
||||
if claims.sub != user.uuid {
|
||||
if claims.sub != *user.uuid {
|
||||
err!("Invalid claim");
|
||||
}
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
@@ -790,7 +857,7 @@ async fn post_delete_recover(data: Json<DeleteRecoverData>, mut conn: DbConn) ->
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct DeleteRecoverTokenData {
|
||||
user_id: String,
|
||||
user_id: UserId,
|
||||
token: String,
|
||||
}
|
||||
|
||||
@@ -798,16 +865,15 @@ struct DeleteRecoverTokenData {
|
||||
async fn post_delete_recover_token(data: Json<DeleteRecoverTokenData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverTokenData = data.into_inner();
|
||||
|
||||
let user = match User::find_by_uuid(&data.user_id, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
let Ok(claims) = decode_delete(&data.token) else {
|
||||
err!("Invalid claim")
|
||||
};
|
||||
|
||||
let claims = match decode_delete(&data.token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => err!("Invalid claim"),
|
||||
let Some(user) = User::find_by_uuid(&data.user_id, &mut conn).await else {
|
||||
err!("User doesn't exist")
|
||||
};
|
||||
if claims.sub != user.uuid {
|
||||
|
||||
if claims.sub != *user.uuid {
|
||||
err!("Invalid claim");
|
||||
}
|
||||
user.delete(&mut conn).await
|
||||
@@ -842,7 +908,7 @@ struct PasswordHintData {
|
||||
|
||||
#[post("/accounts/password-hint", data = "<data>")]
|
||||
async fn password_hint(data: Json<PasswordHintData>, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() {
|
||||
if !CONFIG.password_hints_allowed() || (!CONFIG.mail_enabled() && !CONFIG.show_password_hint()) {
|
||||
err!("This server is not configured to provide password hints.");
|
||||
}
|
||||
|
||||
@@ -859,9 +925,9 @@ async fn password_hint(data: Json<PasswordHintData>, mut conn: DbConn) -> EmptyR
|
||||
// paths that send mail take noticeably longer than ones that
|
||||
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
let mut rng = SmallRng::from_os_rng();
|
||||
let delta: i32 = 100;
|
||||
let sleep_ms = (1_000 + rng.gen_range(-delta..=delta)) as u64;
|
||||
let sleep_ms = (1_000 + rng.random_range(-delta..=delta)) as u64;
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||
Ok(())
|
||||
} else {
|
||||
@@ -901,14 +967,12 @@ pub async fn _prelogin(data: Json<PreloginData>, mut conn: DbConn) -> Json<Value
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT, None, None),
|
||||
};
|
||||
|
||||
let result = json!({
|
||||
Json(json!({
|
||||
"kdf": kdf_type,
|
||||
"kdfIterations": kdf_iter,
|
||||
"kdfMemory": kdf_mem,
|
||||
"kdfParallelism": kdf_para,
|
||||
});
|
||||
|
||||
Json(result)
|
||||
}))
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||
@@ -971,7 +1035,7 @@ async fn get_known_device(device: KnownDevice, mut conn: DbConn) -> JsonResult {
|
||||
|
||||
struct KnownDevice {
|
||||
email: String,
|
||||
uuid: String,
|
||||
uuid: DeviceId,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -980,11 +1044,8 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
||||
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
|
||||
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
|
||||
Ok(bytes) => bytes,
|
||||
Err(_) => {
|
||||
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
|
||||
}
|
||||
let Ok(email_bytes) = data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) else {
|
||||
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
|
||||
};
|
||||
match String::from_utf8(email_bytes) {
|
||||
Ok(email) => email,
|
||||
@@ -997,7 +1058,7 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
||||
};
|
||||
|
||||
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
|
||||
uuid.to_string()
|
||||
uuid.to_string().into()
|
||||
} else {
|
||||
return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required"));
|
||||
};
|
||||
@@ -1009,32 +1070,57 @@ impl<'r> FromRequest<'r> for KnownDevice {
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/devices")]
|
||||
async fn get_all_devices(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let devices = Device::find_with_auth_request_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let devices = devices.iter().map(|device| device.to_json()).collect::<Vec<Value>>();
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": devices,
|
||||
"continuationToken": null,
|
||||
"object": "list"
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/devices/identifier/<device_id>")]
|
||||
async fn get_device(device_id: DeviceId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let Some(device) = Device::find_by_uuid_and_user(&device_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("No device found");
|
||||
};
|
||||
Ok(Json(device.to_json()))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct PushToken {
|
||||
push_token: String,
|
||||
}
|
||||
|
||||
#[post("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
async fn post_device_token(uuid: &str, data: Json<PushToken>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
put_device_token(uuid, data, headers, conn).await
|
||||
#[post("/devices/identifier/<device_id>/token", data = "<data>")]
|
||||
async fn post_device_token(device_id: DeviceId, data: Json<PushToken>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
put_device_token(device_id, data, headers, conn).await
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
|
||||
async fn put_device_token(uuid: &str, data: Json<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
#[put("/devices/identifier/<device_id>/token", data = "<data>")]
|
||||
async fn put_device_token(
|
||||
device_id: DeviceId,
|
||||
data: Json<PushToken>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
let data = data.into_inner();
|
||||
let token = data.push_token;
|
||||
|
||||
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await {
|
||||
Some(device) => device,
|
||||
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")),
|
||||
let Some(mut device) = Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await
|
||||
else {
|
||||
err!(format!("Error: device {device_id} should be present before a token can be assigned"))
|
||||
};
|
||||
|
||||
// if the device already has been registered
|
||||
if device.is_registered() {
|
||||
// check if the new token is the same as the registered token
|
||||
if device.push_token.is_some() && device.push_token.unwrap() == token.clone() {
|
||||
debug!("Device {} is already registered and token is the same", uuid);
|
||||
debug!("Device {} is already registered and token is the same", device_id);
|
||||
return Ok(());
|
||||
} else {
|
||||
// Try to unregister already registered device
|
||||
@@ -1053,8 +1139,8 @@ async fn put_device_token(uuid: &str, data: Json<PushToken>, headers: Headers, m
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/devices/identifier/<uuid>/clear-token")]
|
||||
async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||
#[put("/devices/identifier/<device_id>/clear-token")]
|
||||
async fn put_clear_device_token(device_id: DeviceId, mut conn: DbConn) -> EmptyResult {
|
||||
// This only clears push token
|
||||
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
|
||||
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
|
||||
@@ -1063,8 +1149,8 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await {
|
||||
Device::clear_push_token_by_uuid(uuid, &mut conn).await?;
|
||||
if let Some(device) = Device::find_by_uuid(&device_id, &mut conn).await {
|
||||
Device::clear_push_token_by_uuid(&device_id, &mut conn).await?;
|
||||
unregister_push_device(device.push_uuid).await?;
|
||||
}
|
||||
|
||||
@@ -1072,43 +1158,47 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
|
||||
}
|
||||
|
||||
// On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere
|
||||
#[post("/devices/identifier/<uuid>/clear-token")]
|
||||
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
|
||||
put_clear_device_token(uuid, conn).await
|
||||
#[post("/devices/identifier/<device_id>/clear-token")]
|
||||
async fn post_clear_device_token(device_id: DeviceId, conn: DbConn) -> EmptyResult {
|
||||
put_clear_device_token(device_id, conn).await
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct AuthRequestRequest {
|
||||
access_code: String,
|
||||
device_identifier: String,
|
||||
device_identifier: DeviceId,
|
||||
email: String,
|
||||
public_key: String,
|
||||
#[serde(alias = "type")]
|
||||
_type: i32,
|
||||
// Not used for now
|
||||
// #[serde(alias = "type")]
|
||||
// _type: i32,
|
||||
}
|
||||
|
||||
#[post("/auth-requests", data = "<data>")]
|
||||
async fn post_auth_request(
|
||||
data: Json<AuthRequestRequest>,
|
||||
headers: ClientHeaders,
|
||||
client_headers: ClientHeaders,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data = data.into_inner();
|
||||
|
||||
let user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => {
|
||||
err!("AuthRequest doesn't exist")
|
||||
}
|
||||
let Some(user) = User::find_by_mail(&data.email, &mut conn).await else {
|
||||
err!("AuthRequest doesn't exist", "User not found")
|
||||
};
|
||||
|
||||
// Validate device uuid and type
|
||||
match Device::find_by_uuid_and_user(&data.device_identifier, &user.uuid, &mut conn).await {
|
||||
Some(device) if device.atype == client_headers.device_type => {}
|
||||
_ => err!("AuthRequest doesn't exist", "Device verification failed"),
|
||||
}
|
||||
|
||||
let mut auth_request = AuthRequest::new(
|
||||
user.uuid.clone(),
|
||||
data.device_identifier.clone(),
|
||||
headers.device_type,
|
||||
headers.ip.ip.to_string(),
|
||||
client_headers.device_type,
|
||||
client_headers.ip.ip.to_string(),
|
||||
data.access_code,
|
||||
data.public_key,
|
||||
);
|
||||
@@ -1116,6 +1206,15 @@ async fn post_auth_request(
|
||||
|
||||
nt.send_auth_request(&user.uuid, &auth_request.uuid, &data.device_identifier, &mut conn).await;
|
||||
|
||||
log_user_event(
|
||||
EventType::UserRequestedDeviceApproval as i32,
|
||||
&user.uuid,
|
||||
client_headers.device_type,
|
||||
&client_headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"id": auth_request.uuid,
|
||||
"publicKey": auth_request.public_key,
|
||||
@@ -1123,7 +1222,7 @@ async fn post_auth_request(
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": null,
|
||||
"masterPasswordHash": null,
|
||||
"creationDate": auth_request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||
"creationDate": format_date(&auth_request.creation_date),
|
||||
"responseDate": null,
|
||||
"requestApproved": false,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
@@ -1131,125 +1230,142 @@ async fn post_auth_request(
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/auth-requests/<uuid>")]
|
||||
async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
|
||||
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(auth_request) => auth_request,
|
||||
None => {
|
||||
err!("AuthRequest doesn't exist")
|
||||
}
|
||||
#[get("/auth-requests/<auth_request_id>")]
|
||||
async fn get_auth_request(auth_request_id: AuthRequestId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let Some(auth_request) = AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("AuthRequest doesn't exist", "Record not found or user uuid does not match")
|
||||
};
|
||||
|
||||
let response_date_utc = auth_request
|
||||
.response_date
|
||||
.map(|response_date| response_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true));
|
||||
let response_date_utc = auth_request.response_date.map(|response_date| format_date(&response_date));
|
||||
|
||||
Ok(Json(json!(
|
||||
{
|
||||
"id": uuid,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": auth_request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
}
|
||||
)))
|
||||
Ok(Json(json!({
|
||||
"id": &auth_request_id,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": format_date(&auth_request.creation_date),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct AuthResponseRequest {
|
||||
device_identifier: String,
|
||||
device_identifier: DeviceId,
|
||||
key: String,
|
||||
master_password_hash: Option<String>,
|
||||
request_approved: bool,
|
||||
}
|
||||
|
||||
#[put("/auth-requests/<uuid>", data = "<data>")]
|
||||
#[put("/auth-requests/<auth_request_id>", data = "<data>")]
|
||||
async fn put_auth_request(
|
||||
uuid: &str,
|
||||
auth_request_id: AuthRequestId,
|
||||
data: Json<AuthResponseRequest>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ant: AnonymousNotify<'_>,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data = data.into_inner();
|
||||
let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(auth_request) => auth_request,
|
||||
None => {
|
||||
err!("AuthRequest doesn't exist")
|
||||
}
|
||||
let Some(mut auth_request) =
|
||||
AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("AuthRequest doesn't exist", "Record not found or user uuid does not match")
|
||||
};
|
||||
|
||||
auth_request.approved = Some(data.request_approved);
|
||||
auth_request.enc_key = Some(data.key);
|
||||
auth_request.master_password_hash = data.master_password_hash;
|
||||
auth_request.response_device_id = Some(data.device_identifier.clone());
|
||||
auth_request.save(&mut conn).await?;
|
||||
|
||||
if auth_request.approved.unwrap_or(false) {
|
||||
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await;
|
||||
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.device_identifier, &mut conn).await;
|
||||
if auth_request.approved.is_some() {
|
||||
err!("An authentication request with the same device already exists")
|
||||
}
|
||||
|
||||
let response_date_utc = auth_request
|
||||
.response_date
|
||||
.map(|response_date| response_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true));
|
||||
let response_date = Utc::now().naive_utc();
|
||||
let response_date_utc = format_date(&response_date);
|
||||
|
||||
Ok(Json(json!(
|
||||
{
|
||||
"id": uuid,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": auth_request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
}
|
||||
)))
|
||||
if data.request_approved {
|
||||
auth_request.approved = Some(data.request_approved);
|
||||
auth_request.enc_key = Some(data.key);
|
||||
auth_request.master_password_hash = data.master_password_hash;
|
||||
auth_request.response_device_id = Some(data.device_identifier.clone());
|
||||
auth_request.response_date = Some(response_date);
|
||||
auth_request.save(&mut conn).await?;
|
||||
|
||||
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await;
|
||||
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, &data.device_identifier, &mut conn).await;
|
||||
|
||||
log_user_event(
|
||||
EventType::OrganizationUserApprovedAuthRequest as i32,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
// If denied, there's no reason to keep the request
|
||||
auth_request.delete(&mut conn).await?;
|
||||
log_user_event(
|
||||
EventType::OrganizationUserRejectedAuthRequest as i32,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"id": &auth_request_id,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": format_date(&auth_request.creation_date),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/auth-requests/<uuid>/response?<code>")]
|
||||
async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) -> JsonResult {
|
||||
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(auth_request) => auth_request,
|
||||
None => {
|
||||
err!("AuthRequest doesn't exist")
|
||||
}
|
||||
#[get("/auth-requests/<auth_request_id>/response?<code>")]
|
||||
async fn get_auth_request_response(
|
||||
auth_request_id: AuthRequestId,
|
||||
code: &str,
|
||||
client_headers: ClientHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let Some(auth_request) = AuthRequest::find_by_uuid(&auth_request_id, &mut conn).await else {
|
||||
err!("AuthRequest doesn't exist", "User not found")
|
||||
};
|
||||
|
||||
if !auth_request.check_access_code(code) {
|
||||
err!("Access code invalid doesn't exist")
|
||||
if auth_request.device_type != client_headers.device_type
|
||||
|| auth_request.request_ip != client_headers.ip.ip.to_string()
|
||||
|| !auth_request.check_access_code(code)
|
||||
{
|
||||
err!("AuthRequest doesn't exist", "Invalid device, IP or code")
|
||||
}
|
||||
|
||||
let response_date_utc = auth_request
|
||||
.response_date
|
||||
.map(|response_date| response_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true));
|
||||
let response_date_utc = auth_request.response_date.map(|response_date| format_date(&response_date));
|
||||
|
||||
Ok(Json(json!(
|
||||
{
|
||||
"id": uuid,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": auth_request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
}
|
||||
)))
|
||||
Ok(Json(json!({
|
||||
"id": &auth_request_id,
|
||||
"publicKey": auth_request.public_key,
|
||||
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
|
||||
"requestIpAddress": auth_request.request_ip,
|
||||
"key": auth_request.enc_key,
|
||||
"masterPasswordHash": auth_request.master_password_hash,
|
||||
"creationDate": format_date(&auth_request.creation_date),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": auth_request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
"object":"auth-request"
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/auth-requests")]
|
||||
@@ -1261,7 +1377,7 @@ async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
.iter()
|
||||
.filter(|request| request.approved.is_none())
|
||||
.map(|request| {
|
||||
let response_date_utc = request.response_date.map(|response_date| response_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true));
|
||||
let response_date_utc = request.response_date.map(|response_date| format_date(&response_date));
|
||||
|
||||
json!({
|
||||
"id": request.uuid,
|
||||
@@ -1270,7 +1386,7 @@ async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
"requestIpAddress": request.request_ip,
|
||||
"key": request.enc_key,
|
||||
"masterPasswordHash": request.master_password_hash,
|
||||
"creationDate": request.creation_date.and_utc().to_rfc3339_opts(SecondsFormat::Micros, true),
|
||||
"creationDate": format_date(&request.creation_date),
|
||||
"responseDate": response_date_utc,
|
||||
"requestApproved": request.approved,
|
||||
"origin": CONFIG.domain_origin(),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -93,10 +93,10 @@ async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/emergency-access/<emer_id>")]
|
||||
async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn get_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emergency_access) => Ok(Json(
|
||||
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
|
||||
)),
|
||||
@@ -118,7 +118,7 @@ struct EmergencyAccessUpdateData {
|
||||
|
||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn put_emergency_access(
|
||||
emer_id: &str,
|
||||
emer_id: EmergencyAccessId,
|
||||
data: Json<EmergencyAccessUpdateData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
@@ -128,7 +128,7 @@ async fn put_emergency_access(
|
||||
|
||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||
async fn post_emergency_access(
|
||||
emer_id: &str,
|
||||
emer_id: EmergencyAccessId,
|
||||
data: Json<EmergencyAccessUpdateData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -137,11 +137,11 @@ async fn post_emergency_access(
|
||||
|
||||
let data: EmergencyAccessUpdateData = data.into_inner();
|
||||
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(mut emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
@@ -163,12 +163,12 @@ async fn post_emergency_access(
|
||||
// region delete
|
||||
|
||||
#[delete("/emergency-access/<emer_id>")]
|
||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let emergency_access = match (
|
||||
EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await,
|
||||
EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await,
|
||||
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await,
|
||||
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await,
|
||||
) {
|
||||
(Some(grantor_emer), None) => {
|
||||
info!("Grantor deleted emergency access {emer_id}");
|
||||
@@ -186,7 +186,7 @@ async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/delete")]
|
||||
async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_emergency_access(emer_id, headers, conn).await
|
||||
}
|
||||
|
||||
@@ -266,8 +266,8 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite(
|
||||
&new_emergency_access.email.expect("Grantee email does not exists"),
|
||||
&grantee_user.uuid,
|
||||
&new_emergency_access.uuid,
|
||||
grantee_user.uuid,
|
||||
new_emergency_access.uuid,
|
||||
&grantor_user.name,
|
||||
&grantor_user.email,
|
||||
)
|
||||
@@ -281,27 +281,25 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn resend_invite(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(mut emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
||||
err!("The grantee user is already accepted or confirmed to the organization");
|
||||
}
|
||||
|
||||
let email = match emergency_access.email.clone() {
|
||||
Some(email) => email,
|
||||
None => err!("Email not valid."),
|
||||
let Some(email) = emergency_access.email.clone() else {
|
||||
err!("Email not valid.")
|
||||
};
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
let Some(grantee_user) = User::find_by_mail(&email, &mut conn).await else {
|
||||
err!("Grantee user not found.")
|
||||
};
|
||||
|
||||
let grantor_user = headers.user;
|
||||
@@ -309,8 +307,8 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite(
|
||||
&email,
|
||||
&grantor_user.uuid,
|
||||
&emergency_access.uuid,
|
||||
grantor_user.uuid,
|
||||
emergency_access.uuid,
|
||||
&grantor_user.name,
|
||||
&grantor_user.email,
|
||||
)
|
||||
@@ -333,7 +331,12 @@ struct AcceptData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||
async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
async fn accept_invite(
|
||||
emer_id: EmergencyAccessId,
|
||||
data: Json<AcceptData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let data: AcceptData = data.into_inner();
|
||||
@@ -356,16 +359,15 @@ async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers,
|
||||
|
||||
// We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database.
|
||||
// The uuid of the grantee gets stored once accepted.
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_email(emer_id, &headers.user.email, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(mut emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantee_email(&emer_id, &headers.user.email, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||
err!("Grantor user not found.")
|
||||
};
|
||||
|
||||
if emer_id == claims.emer_id
|
||||
@@ -392,7 +394,7 @@ struct ConfirmData {
|
||||
|
||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||
async fn confirm_emergency_access(
|
||||
emer_id: &str,
|
||||
emer_id: EmergencyAccessId,
|
||||
data: Json<ConfirmData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -403,11 +405,11 @@ async fn confirm_emergency_access(
|
||||
let data: ConfirmData = data.into_inner();
|
||||
let key = data.key;
|
||||
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &confirming_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(mut emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &confirming_user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
||||
|| emergency_access.grantor_uuid != confirming_user.uuid
|
||||
@@ -415,15 +417,13 @@ async fn confirm_emergency_access(
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
let Some(grantor_user) = User::find_by_uuid(&confirming_user.uuid, &mut conn).await else {
|
||||
err!("Grantor user not found.")
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
|
||||
err!("Grantee user not found.")
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||
@@ -446,23 +446,22 @@ async fn confirm_emergency_access(
|
||||
// region access emergency access
|
||||
|
||||
#[post("/emergency-access/<emer_id>/initiate")]
|
||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let initiating_user = headers.user;
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &initiating_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(mut emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &initiating_user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||
err!("Grantor user not found.")
|
||||
};
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
@@ -485,28 +484,26 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/approve")]
|
||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn approve_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(mut emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
let Some(grantor_user) = User::find_by_uuid(&headers.user.uuid, &mut conn).await else {
|
||||
err!("Grantor user not found.")
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
|
||||
err!("Grantee user not found.")
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||
@@ -522,14 +519,14 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reject")]
|
||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn reject_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let mut emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(mut emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32
|
||||
@@ -538,9 +535,8 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
||||
}
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
|
||||
err!("Grantee user not found.")
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||
@@ -560,14 +556,14 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
||||
// region action
|
||||
|
||||
#[post("/emergency-access/<emer_id>/view")]
|
||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
||||
err!("Emergency access not valid.")
|
||||
@@ -598,23 +594,22 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/takeover")]
|
||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn takeover_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_enabled()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||
err!("Grantor user not found.")
|
||||
};
|
||||
|
||||
let result = json!({
|
||||
@@ -638,7 +633,7 @@ struct EmergencyAccessPasswordData {
|
||||
|
||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||
async fn password_emergency_access(
|
||||
emer_id: &str,
|
||||
emer_id: EmergencyAccessId,
|
||||
data: Json<EmergencyAccessPasswordData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -650,19 +645,18 @@ async fn password_emergency_access(
|
||||
//let key = &data.Key;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
let Some(mut grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||
err!("Grantor user not found.")
|
||||
};
|
||||
|
||||
// change grantor_user password
|
||||
@@ -673,9 +667,9 @@ async fn password_emergency_access(
|
||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?;
|
||||
|
||||
// Remove grantor from all organisations unless Owner
|
||||
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &mut conn).await {
|
||||
if user_org.atype != UserOrgType::Owner as i32 {
|
||||
user_org.delete(&mut conn).await?;
|
||||
for member in Membership::find_any_state_by_user(&grantor_user.uuid, &mut conn).await {
|
||||
if member.atype != MembershipType::Owner as i32 {
|
||||
member.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -684,21 +678,20 @@ async fn password_emergency_access(
|
||||
// endregion
|
||||
|
||||
#[get("/emergency-access/<emer_id>/policies")]
|
||||
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn policies_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access =
|
||||
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
let Some(emergency_access) =
|
||||
EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
|
||||
else {
|
||||
err!("Emergency access not valid.")
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
|
||||
err!("Grantor user not found.")
|
||||
};
|
||||
|
||||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn);
|
||||
@@ -713,11 +706,11 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
||||
|
||||
fn is_valid_request(
|
||||
emergency_access: &EmergencyAccess,
|
||||
requesting_user_uuid: &str,
|
||||
requesting_user_id: &UserId,
|
||||
requested_access_type: EmergencyAccessType,
|
||||
) -> bool {
|
||||
emergency_access.grantee_uuid.is_some()
|
||||
&& emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_uuid
|
||||
&& emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_id
|
||||
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
|
||||
&& emergency_access.atype == requested_access_type as i32
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::{
|
||||
api::{EmptyResult, JsonResult},
|
||||
auth::{AdminHeaders, Headers},
|
||||
db::{
|
||||
models::{Cipher, Event, UserOrganization},
|
||||
models::{Cipher, CipherId, Event, Membership, MembershipId, OrganizationId, UserId},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
util::parse_date,
|
||||
@@ -31,7 +31,16 @@ struct EventRange {
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
|
||||
#[get("/organizations/<org_id>/events?<data..>")]
|
||||
async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
||||
async fn get_org_events(
|
||||
org_id: OrganizationId,
|
||||
data: EventRange,
|
||||
headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
@@ -44,7 +53,7 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_organization_uuid(org_id, &start_date, &end_date, &mut conn)
|
||||
Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
@@ -59,14 +68,14 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>/events?<data..>")]
|
||||
async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
async fn get_cipher_events(cipher_id: CipherId, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let mut events_json = Vec::with_capacity(0);
|
||||
if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, cipher_id, &mut conn).await {
|
||||
if Membership::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
@@ -74,7 +83,7 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
events_json = Event::find_by_cipher_uuid(cipher_id, &start_date, &end_date, &mut conn)
|
||||
events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
@@ -90,14 +99,17 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
|
||||
#[get("/organizations/<org_id>/users/<member_id>/events?<data..>")]
|
||||
async fn get_user_events(
|
||||
org_id: &str,
|
||||
user_org_id: &str,
|
||||
org_id: OrganizationId,
|
||||
member_id: MembershipId,
|
||||
data: EventRange,
|
||||
_headers: AdminHeaders,
|
||||
headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
if org_id != headers.org_id {
|
||||
err!("Organization not found", "Organization id's do not match");
|
||||
}
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
@@ -110,7 +122,7 @@ async fn get_user_events(
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_org_and_user_org(org_id, user_org_id, &start_date, &end_date, &mut conn)
|
||||
Event::find_by_org_and_member(&org_id, &member_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
@@ -152,8 +164,8 @@ struct EventCollection {
|
||||
date: String,
|
||||
|
||||
// Optional
|
||||
cipher_id: Option<String>,
|
||||
organization_id: Option<String>,
|
||||
cipher_id: Option<CipherId>,
|
||||
organization_id: Option<OrganizationId>,
|
||||
}
|
||||
|
||||
// Upstream:
|
||||
@@ -180,11 +192,11 @@ async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers,
|
||||
.await;
|
||||
}
|
||||
1600..=1699 => {
|
||||
if let Some(org_uuid) = &event.organization_id {
|
||||
if let Some(org_id) = &event.organization_id {
|
||||
_log_event(
|
||||
event.r#type,
|
||||
org_uuid,
|
||||
org_uuid,
|
||||
org_id,
|
||||
org_id,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
@@ -197,11 +209,11 @@ async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers,
|
||||
_ => {
|
||||
if let Some(cipher_uuid) = &event.cipher_id {
|
||||
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||
if let Some(org_uuid) = cipher.organization_uuid {
|
||||
if let Some(org_id) = cipher.organization_uuid {
|
||||
_log_event(
|
||||
event.r#type,
|
||||
cipher_uuid,
|
||||
&org_uuid,
|
||||
&org_id,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
@@ -218,38 +230,39 @@ async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers,
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
|
||||
pub async fn log_user_event(event_type: i32, user_id: &UserId, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_user_event(event_type, user_uuid, device_type, None, ip, conn).await;
|
||||
_log_user_event(event_type, user_id, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
async fn _log_user_event(
|
||||
event_type: i32,
|
||||
user_uuid: &str,
|
||||
user_id: &UserId,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await;
|
||||
let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org
|
||||
let memberships = Membership::find_by_user(user_id, conn).await;
|
||||
let mut events: Vec<Event> = Vec::with_capacity(memberships.len() + 1); // We need an event per org and one without an org
|
||||
|
||||
// Upstream saves the event also without any org_uuid.
|
||||
// Upstream saves the event also without any org_id.
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
event.user_uuid = Some(String::from(user_uuid));
|
||||
event.act_user_uuid = Some(String::from(user_uuid));
|
||||
event.user_uuid = Some(user_id.clone());
|
||||
event.act_user_uuid = Some(user_id.clone());
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
events.push(event);
|
||||
|
||||
// For each org a user is a member of store these events per org
|
||||
for org_uuid in orgs {
|
||||
for membership in memberships {
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
event.user_uuid = Some(String::from(user_uuid));
|
||||
event.org_uuid = Some(org_uuid);
|
||||
event.act_user_uuid = Some(String::from(user_uuid));
|
||||
event.user_uuid = Some(user_id.clone());
|
||||
event.org_uuid = Some(membership.org_uuid);
|
||||
event.org_user_uuid = Some(membership.uuid);
|
||||
event.act_user_uuid = Some(user_id.clone());
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
events.push(event);
|
||||
@@ -261,8 +274,8 @@ async fn _log_user_event(
|
||||
pub async fn log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: &str,
|
||||
act_user_uuid: &str,
|
||||
org_id: &OrganizationId,
|
||||
act_user_id: &UserId,
|
||||
device_type: i32,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
@@ -270,15 +283,15 @@ pub async fn log_event(
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await;
|
||||
_log_event(event_type, source_uuid, org_id, act_user_id, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn _log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: &str,
|
||||
act_user_uuid: &str,
|
||||
org_id: &OrganizationId,
|
||||
act_user_id: &UserId,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
ip: &IpAddr,
|
||||
@@ -290,31 +303,31 @@ async fn _log_event(
|
||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||
// Cipher Events
|
||||
1100..=1199 => {
|
||||
event.cipher_uuid = Some(String::from(source_uuid));
|
||||
event.cipher_uuid = Some(source_uuid.to_string().into());
|
||||
}
|
||||
// Collection Events
|
||||
1300..=1399 => {
|
||||
event.collection_uuid = Some(String::from(source_uuid));
|
||||
event.collection_uuid = Some(source_uuid.to_string().into());
|
||||
}
|
||||
// Group Events
|
||||
1400..=1499 => {
|
||||
event.group_uuid = Some(String::from(source_uuid));
|
||||
event.group_uuid = Some(source_uuid.to_string().into());
|
||||
}
|
||||
// Org User Events
|
||||
1500..=1599 => {
|
||||
event.org_user_uuid = Some(String::from(source_uuid));
|
||||
event.org_user_uuid = Some(source_uuid.to_string().into());
|
||||
}
|
||||
// 1600..=1699 Are organizational events, and they do not need the source_uuid
|
||||
// Policy Events
|
||||
1700..=1799 => {
|
||||
event.policy_uuid = Some(String::from(source_uuid));
|
||||
event.policy_uuid = Some(source_uuid.to_string().into());
|
||||
}
|
||||
// Ignore others
|
||||
_ => {}
|
||||
}
|
||||
|
||||
event.org_uuid = Some(String::from(org_uuid));
|
||||
event.act_user_uuid = Some(String::from(act_user_uuid));
|
||||
event.org_uuid = Some(org_id.clone());
|
||||
event.act_user_uuid = Some(act_user_id.clone());
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
event.save(conn).await.unwrap_or(());
|
||||
|
||||
@@ -23,25 +23,19 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/folders/<uuid>")]
|
||||
async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
|
||||
if folder.user_uuid != headers.user.uuid {
|
||||
err!("Folder belongs to another user")
|
||||
#[get("/folders/<folder_id>")]
|
||||
async fn get_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
match Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(folder) => Ok(Json(folder.to_json())),
|
||||
_ => err!("Invalid folder", "Folder does not exist or belongs to another user"),
|
||||
}
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct FolderData {
|
||||
pub name: String,
|
||||
pub id: Option<String>,
|
||||
pub id: Option<FolderId>,
|
||||
}
|
||||
|
||||
#[post("/folders", data = "<data>")]
|
||||
@@ -56,14 +50,20 @@ async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>", data = "<data>")]
|
||||
async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, nt).await
|
||||
#[post("/folders/<folder_id>", data = "<data>")]
|
||||
async fn post_folder(
|
||||
folder_id: FolderId,
|
||||
data: Json<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
put_folder(folder_id, data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[put("/folders/<uuid>", data = "<data>")]
|
||||
#[put("/folders/<folder_id>", data = "<data>")]
|
||||
async fn put_folder(
|
||||
uuid: &str,
|
||||
folder_id: FolderId,
|
||||
data: Json<FolderData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -71,15 +71,10 @@ async fn put_folder(
|
||||
) -> JsonResult {
|
||||
let data: FolderData = data.into_inner();
|
||||
|
||||
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
let Some(mut folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("Invalid folder", "Folder does not exist or belongs to another user")
|
||||
};
|
||||
|
||||
if folder.user_uuid != headers.user.uuid {
|
||||
err!("Folder belongs to another user")
|
||||
}
|
||||
|
||||
folder.name = data.name;
|
||||
|
||||
folder.save(&mut conn).await?;
|
||||
@@ -88,22 +83,17 @@ async fn put_folder(
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>/delete")]
|
||||
async fn delete_folder_post(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt).await
|
||||
#[post("/folders/<folder_id>/delete")]
|
||||
async fn delete_folder_post(folder_id: FolderId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
delete_folder(folder_id, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[delete("/folders/<uuid>")]
|
||||
async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
#[delete("/folders/<folder_id>")]
|
||||
async fn delete_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let Some(folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("Invalid folder", "Folder does not exist or belongs to another user")
|
||||
};
|
||||
|
||||
if folder.user_uuid != headers.user.uuid {
|
||||
err!("Folder belongs to another user")
|
||||
}
|
||||
|
||||
// Delete the actual folder entry
|
||||
folder.delete(&mut conn).await?;
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ pub use sends::purge_sends;
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||
let mut hibp_routes = routes![hibp_breach];
|
||||
let mut meta_routes = routes![alive, now, version, config];
|
||||
let mut meta_routes = routes![alive, now, version, config, get_api_webauthn];
|
||||
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut accounts::routes());
|
||||
@@ -135,12 +135,13 @@ async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbC
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
async fn hibp_breach(username: &str) -> JsonResult {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||
);
|
||||
|
||||
async fn hibp_breach(username: &str, _headers: Headers) -> JsonResult {
|
||||
let username: String = url::form_urlencoded::byte_serialize(username.as_bytes()).collect();
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false"
|
||||
);
|
||||
|
||||
let res = make_http_request(Method::GET, &url)?.header("hibp-api-key", api_key).send().await?;
|
||||
|
||||
// If we get a 404, return a 404, it means no breached accounts
|
||||
@@ -183,6 +184,18 @@ fn version() -> Json<&'static str> {
|
||||
Json(crate::VERSION.unwrap_or_default())
|
||||
}
|
||||
|
||||
#[get("/webauthn")]
|
||||
fn get_api_webauthn(_headers: Headers) -> Json<Value> {
|
||||
// Prevent a 404 error, which also causes key-rotation issues
|
||||
// It looks like this is used when login with passkeys is enabled, which Vaultwarden does not (yet) support
|
||||
// An empty list/data also works fine
|
||||
Json(json!({
|
||||
"object": "list",
|
||||
"data": [],
|
||||
"continuationToken": null
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/config")]
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
@@ -197,8 +210,8 @@ fn config() -> Json<Value> {
|
||||
// This means they expect a version that closely matches the Bitwarden server version
|
||||
// We should make sure that we keep this updated when we support the new server features
|
||||
// Version history:
|
||||
// - Individual cipher key encryption: 2023.9.1
|
||||
"version": "2024.2.0",
|
||||
// - Individual cipher key encryption: 2024.2.0
|
||||
"version": "2025.1.0",
|
||||
"gitHash": option_env!("GIT_REV"),
|
||||
"server": {
|
||||
"name": "Vaultwarden",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -52,40 +52,36 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
|
||||
let data = data.into_inner();
|
||||
|
||||
for user_data in &data.members {
|
||||
let mut user_created: bool = false;
|
||||
if user_data.deleted {
|
||||
// If user is marked for deletion and it exists, revoke it
|
||||
if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
|
||||
{
|
||||
if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await {
|
||||
// Only revoke a user if it is not the last confirmed owner
|
||||
let revoked = if user_org.atype == UserOrgType::Owner
|
||||
&& user_org.status == UserOrgStatus::Confirmed as i32
|
||||
let revoked = if member.atype == MembershipType::Owner
|
||||
&& member.status == MembershipStatus::Confirmed as i32
|
||||
{
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await
|
||||
<= 1
|
||||
if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1
|
||||
{
|
||||
warn!("Can't revoke the last owner");
|
||||
false
|
||||
} else {
|
||||
user_org.revoke()
|
||||
member.revoke()
|
||||
}
|
||||
} else {
|
||||
user_org.revoke()
|
||||
member.revoke()
|
||||
};
|
||||
|
||||
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
|
||||
let ext_modified = member.set_external_id(Some(user_data.external_id.clone()));
|
||||
if revoked || ext_modified {
|
||||
user_org.save(&mut conn).await?;
|
||||
member.save(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
// If user is part of the organization, restore it
|
||||
} else if let Some(mut user_org) =
|
||||
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
|
||||
{
|
||||
let restored = user_org.restore();
|
||||
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
|
||||
} else if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await {
|
||||
let restored = member.restore();
|
||||
let ext_modified = member.set_external_id(Some(user_data.external_id.clone()));
|
||||
if restored || ext_modified {
|
||||
user_org.save(&mut conn).await?;
|
||||
member.save(&mut conn).await?;
|
||||
}
|
||||
} else {
|
||||
// If user is not part of the organization
|
||||
@@ -97,25 +93,25 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
|
||||
new_user.save(&mut conn).await?;
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
let invitation = Invitation::new(&new_user.email);
|
||||
invitation.save(&mut conn).await?;
|
||||
Invitation::new(&new_user.email).save(&mut conn).await?;
|
||||
}
|
||||
user_created = true;
|
||||
new_user
|
||||
}
|
||||
};
|
||||
let user_org_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() {
|
||||
UserOrgStatus::Invited as i32
|
||||
let member_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() {
|
||||
MembershipStatus::Invited as i32
|
||||
} else {
|
||||
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
MembershipStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||
};
|
||||
|
||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||
new_org_user.set_external_id(Some(user_data.external_id.clone()));
|
||||
new_org_user.access_all = false;
|
||||
new_org_user.atype = UserOrgType::User as i32;
|
||||
new_org_user.status = user_org_status;
|
||||
let mut new_member = Membership::new(user.uuid.clone(), org_id.clone());
|
||||
new_member.set_external_id(Some(user_data.external_id.clone()));
|
||||
new_member.access_all = false;
|
||||
new_member.atype = MembershipType::User as i32;
|
||||
new_member.status = member_status;
|
||||
|
||||
new_org_user.save(&mut conn).await?;
|
||||
new_member.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await {
|
||||
@@ -123,8 +119,18 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
|
||||
None => err!("Error looking up organization"),
|
||||
};
|
||||
|
||||
mail::send_invite(&user, Some(org_id.clone()), Some(new_org_user.uuid), &org_name, Some(org_email))
|
||||
.await?;
|
||||
if let Err(e) =
|
||||
mail::send_invite(&user, org_id.clone(), new_member.uuid.clone(), &org_name, Some(org_email)).await
|
||||
{
|
||||
// Upon error delete the user, invite and org member records when needed
|
||||
if user_created {
|
||||
user.delete(&mut conn).await?;
|
||||
} else {
|
||||
new_member.delete(&mut conn).await?;
|
||||
}
|
||||
|
||||
err!(format!("Error sending invite: {e:?} "));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -149,9 +155,8 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
|
||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||
|
||||
for ext_id in &group_data.member_external_ids {
|
||||
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
||||
{
|
||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||
if let Some(member) = Membership::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await {
|
||||
let mut group_user = GroupUser::new(group_uuid.clone(), member.uuid.clone());
|
||||
group_user.save(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
@@ -164,20 +169,19 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
|
||||
if data.overwrite_existing {
|
||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||
let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect();
|
||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||
if let Some(ref user_external_id) = user_org.external_id {
|
||||
for member in Membership::find_by_org(&org_id, &mut conn).await {
|
||||
if let Some(ref user_external_id) = member.external_id {
|
||||
if !sync_members.contains(user_external_id) {
|
||||
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 {
|
||||
if member.atype == MembershipType::Owner && member.status == MembershipStatus::Confirmed as i32 {
|
||||
// Removing owner, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn)
|
||||
.await
|
||||
if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await
|
||||
<= 1
|
||||
{
|
||||
warn!("Can't delete the last owner");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
user_org.delete(&mut conn).await?;
|
||||
member.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -186,7 +190,7 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct PublicToken(String);
|
||||
pub struct PublicToken(OrganizationId);
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for PublicToken {
|
||||
@@ -203,9 +207,8 @@ impl<'r> FromRequest<'r> for PublicToken {
|
||||
None => err_handler!("No access token provided"),
|
||||
};
|
||||
// Check JWT token is valid and get device and user from it
|
||||
let claims = match auth::decode_api_org(access_token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => err_handler!("Invalid claim"),
|
||||
let Ok(claims) = auth::decode_api_org(access_token) else {
|
||||
err_handler!("Invalid claim")
|
||||
};
|
||||
// Check if time is between claims.nbf and claims.exp
|
||||
let time_now = Utc::now().timestamp();
|
||||
@@ -227,13 +230,12 @@ impl<'r> FromRequest<'r> for PublicToken {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
let org_uuid = match claims.client_id.strip_prefix("organization.") {
|
||||
Some(uuid) => uuid,
|
||||
None => err_handler!("Malformed client_id"),
|
||||
let Some(org_id) = claims.client_id.strip_prefix("organization.") else {
|
||||
err_handler!("Malformed client_id")
|
||||
};
|
||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, &conn).await {
|
||||
Some(org_api_key) => org_api_key,
|
||||
None => err_handler!("Invalid client_id"),
|
||||
let org_id: OrganizationId = org_id.to_string().into();
|
||||
let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(&org_id, &conn).await else {
|
||||
err_handler!("Invalid client_id")
|
||||
};
|
||||
if org_api_key.org_uuid != claims.client_sub {
|
||||
err_handler!("Token not issued for this org");
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
|
||||
auth::{ClientIp, Headers, Host},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::{NumberOrString, SafeString},
|
||||
util::NumberOrString,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -67,7 +67,7 @@ pub struct SendData {
|
||||
file_length: Option<NumberOrString>,
|
||||
|
||||
// Used for key rotations
|
||||
pub id: Option<String>,
|
||||
pub id: Option<SendId>,
|
||||
}
|
||||
|
||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||
@@ -79,9 +79,9 @@ pub struct SendData {
|
||||
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
||||
/// controls this policy globally.
|
||||
async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let user_id = &headers.user.uuid;
|
||||
if !CONFIG.sends_allowed()
|
||||
|| OrgPolicy::is_applicable_to_user(user_uuid, OrgPolicyType::DisableSend, None, conn).await
|
||||
|| OrgPolicy::is_applicable_to_user(user_id, OrgPolicyType::DisableSend, None, conn).await
|
||||
{
|
||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||
}
|
||||
@@ -95,9 +95,9 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let user_id = &headers.user.uuid;
|
||||
let hide_email = data.hide_email.unwrap_or(false);
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_id, conn).await {
|
||||
err!(
|
||||
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||
from recipients when creating or editing a Send."
|
||||
@@ -106,7 +106,7 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
fn create_send(data: SendData, user_id: UserId) -> ApiResult<Send> {
|
||||
let data_val = if data.r#type == SendType::Text as i32 {
|
||||
data.text
|
||||
} else if data.r#type == SendType::File as i32 {
|
||||
@@ -129,7 +129,7 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
}
|
||||
|
||||
let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc());
|
||||
send.user_uuid = Some(user_uuid);
|
||||
send.user_uuid = Some(user_id);
|
||||
send.notes = data.notes;
|
||||
send.max_access_count = match data.max_access_count {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
@@ -157,18 +157,12 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
}))
|
||||
}
|
||||
|
||||
#[get("/sends/<uuid>")]
|
||||
async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let send = match Send::find_by_uuid(uuid, &mut conn).await {
|
||||
Some(send) => send,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
#[get("/sends/<send_id>")]
|
||||
async fn get_send(send_id: SendId, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
match Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await {
|
||||
Some(send) => Ok(Json(send.to_json())),
|
||||
None => err!("Send not found", "Invalid send uuid or does not belong to user"),
|
||||
}
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[post("/sends", data = "<data>")]
|
||||
@@ -255,7 +249,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
let file_id = crate::crypto::generate_send_file_id();
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
@@ -330,7 +324,7 @@ async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbC
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
let file_id = crate::crypto::generate_send_file_id();
|
||||
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
@@ -352,16 +346,16 @@ async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbC
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendFileData {
|
||||
id: String,
|
||||
id: SendFileId,
|
||||
size: u64,
|
||||
fileName: String,
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/66f95d1c443490b653e5a15d32977e2f5a3f9e32/src/Api/Tools/Controllers/SendsController.cs#L250
|
||||
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||
#[post("/sends/<send_id>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||
async fn post_send_file_v2_data(
|
||||
send_uuid: &str,
|
||||
file_id: &str,
|
||||
send_id: SendId,
|
||||
file_id: SendFileId,
|
||||
data: Form<UploadDataV2<'_>>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
@@ -371,22 +365,14 @@ async fn post_send_file_v2_data(
|
||||
|
||||
let mut data = data.into_inner();
|
||||
|
||||
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else {
|
||||
err!("Send not found. Unable to save the file.")
|
||||
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.")
|
||||
};
|
||||
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send is not a file type send.");
|
||||
}
|
||||
|
||||
let Some(send_user_id) = &send.user_uuid else {
|
||||
err!("Sends are only supported for users at the moment.")
|
||||
};
|
||||
|
||||
if send_user_id != &headers.user.uuid {
|
||||
err!("Send doesn't belong to user.");
|
||||
}
|
||||
|
||||
let Ok(send_data) = serde_json::from_str::<SendFileData>(&send.data) else {
|
||||
err!("Unable to decode send data as json.")
|
||||
};
|
||||
@@ -416,7 +402,7 @@ async fn post_send_file_v2_data(
|
||||
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
|
||||
}
|
||||
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid);
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_id);
|
||||
let file_path = folder_path.join(file_id);
|
||||
|
||||
// Check if the file already exists, if that is the case do not overwrite it
|
||||
@@ -456,9 +442,8 @@ async fn post_access(
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(access_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
let Some(mut send) = Send::find_by_access_id(access_id, &mut conn).await else {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
};
|
||||
|
||||
if let Some(max_access_count) = send.max_access_count {
|
||||
@@ -500,7 +485,7 @@ async fn post_access(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&String::from("00000000-0000-0000-0000-000000000000"),
|
||||
&String::from("00000000-0000-0000-0000-000000000000").into(),
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
@@ -510,16 +495,15 @@ async fn post_access(
|
||||
|
||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||
async fn post_access_file(
|
||||
send_id: &str,
|
||||
file_id: &str,
|
||||
send_id: SendId,
|
||||
file_id: SendFileId,
|
||||
data: Json<SendAccessData>,
|
||||
host: Host,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_uuid(send_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
let Some(mut send) = Send::find_by_uuid(&send_id, &mut conn).await else {
|
||||
err_code!(SEND_INACCESSIBLE_MSG, 404)
|
||||
};
|
||||
|
||||
if let Some(max_access_count) = send.max_access_count {
|
||||
@@ -558,12 +542,12 @@ async fn post_access_file(
|
||||
UpdateType::SyncSendUpdate,
|
||||
&send,
|
||||
&send.update_users_revision(&mut conn).await,
|
||||
&String::from("00000000-0000-0000-0000-000000000000"),
|
||||
&String::from("00000000-0000-0000-0000-000000000000").into(),
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
Ok(Json(json!({
|
||||
"object": "send-fileDownload",
|
||||
@@ -573,7 +557,7 @@ async fn post_access_file(
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Option<NamedFile> {
|
||||
async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(t) {
|
||||
if claims.sub == format!("{send_id}/{file_id}") {
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
||||
@@ -582,16 +566,21 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt
|
||||
None
|
||||
}
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
#[put("/sends/<send_id>", data = "<data>")]
|
||||
async fn put_send(
|
||||
send_id: SendId,
|
||||
data: Json<SendData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner();
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("Send not found", "Send send_id is invalid or does not belong to user")
|
||||
};
|
||||
|
||||
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
|
||||
@@ -657,17 +646,12 @@ pub async fn update_send_from_data(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[delete("/sends/<id>")]
|
||||
async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
#[delete("/sends/<send_id>")]
|
||||
async fn delete_send(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("Send not found", "Invalid send uuid, or does not belong to user")
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.delete(&mut conn).await?;
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendDelete,
|
||||
@@ -681,19 +665,14 @@ async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/sends/<id>/remove-password")]
|
||||
async fn put_remove_password(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
#[put("/sends/<send_id>/remove-password")]
|
||||
async fn put_remove_password(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("Send not found", "Invalid send uuid, or does not belong to user")
|
||||
};
|
||||
|
||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.set_password(None);
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::{
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType, UserId},
|
||||
DbConn,
|
||||
},
|
||||
util::NumberOrString,
|
||||
@@ -16,7 +16,7 @@ use crate::{
|
||||
pub use crate::config::CONFIG;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![generate_authenticator, activate_authenticator, activate_authenticator_put,]
|
||||
routes![generate_authenticator, activate_authenticator, activate_authenticator_put, disable_authenticator]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
@@ -95,7 +95,7 @@ async fn activate_authenticator_put(data: Json<EnableAuthenticatorData>, headers
|
||||
}
|
||||
|
||||
pub async fn validate_totp_code_str(
|
||||
user_uuid: &str,
|
||||
user_id: &UserId,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
@@ -105,11 +105,11 @@ pub async fn validate_totp_code_str(
|
||||
err!("TOTP code is not a number");
|
||||
}
|
||||
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await
|
||||
validate_totp_code(user_id, totp_code, secret, ip, conn).await
|
||||
}
|
||||
|
||||
pub async fn validate_totp_code(
|
||||
user_uuid: &str,
|
||||
user_id: &UserId,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
@@ -117,16 +117,15 @@ pub async fn validate_totp_code(
|
||||
) -> EmptyResult {
|
||||
use totp_lite::{totp_custom, Sha1};
|
||||
|
||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
||||
Ok(s) => s,
|
||||
Err(_) => err!("Invalid TOTP secret"),
|
||||
let Ok(decoded_secret) = BASE32.decode(secret.as_bytes()) else {
|
||||
err!("Invalid TOTP secret")
|
||||
};
|
||||
|
||||
let mut twofactor =
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
let mut twofactor = match TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Authenticator as i32, conn).await
|
||||
{
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_id.clone(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
|
||||
// The amount of steps back and forward in time
|
||||
// Also check if we need to disable time drifted TOTP codes.
|
||||
@@ -176,3 +175,47 @@ pub async fn validate_totp_code(
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct DisableAuthenticatorData {
|
||||
key: String,
|
||||
master_password_hash: String,
|
||||
r#type: NumberOrString,
|
||||
}
|
||||
|
||||
#[delete("/two-factor/authenticator", data = "<data>")]
|
||||
async fn disable_authenticator(data: Json<DisableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = headers.user;
|
||||
let type_ = data.r#type.into_i32()?;
|
||||
|
||||
if !user.check_valid_password(&data.master_password_hash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
if twofactor.data == data.key {
|
||||
twofactor.delete(&mut conn).await?;
|
||||
log_user_event(
|
||||
EventType::UserDisabled2fa as i32,
|
||||
&user.uuid,
|
||||
headers.device.atype,
|
||||
&headers.ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
err!(format!("TOTP key for user {} does not match recorded value, cannot deactivate", &user.email));
|
||||
}
|
||||
}
|
||||
|
||||
if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() {
|
||||
super::enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?;
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"enabled": false,
|
||||
"keys": type_,
|
||||
"object": "twoFactorProvider"
|
||||
})))
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::{
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||
models::{EventType, TwoFactor, TwoFactorType, User, UserId},
|
||||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
@@ -26,8 +26,8 @@ pub fn routes() -> Vec<Route> {
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct DuoData {
|
||||
host: String, // Duo API hostname
|
||||
ik: String, // integration key
|
||||
sk: String, // secret key
|
||||
ik: String, // client id
|
||||
sk: String, // client secret
|
||||
}
|
||||
|
||||
impl DuoData {
|
||||
@@ -111,8 +111,8 @@ async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbCo
|
||||
json!({
|
||||
"enabled": enabled,
|
||||
"host": data.host,
|
||||
"secretKey": data.sk,
|
||||
"integrationKey": data.ik,
|
||||
"clientSecret": data.sk,
|
||||
"clientId": data.ik,
|
||||
"object": "twoFactorDuo"
|
||||
})
|
||||
} else {
|
||||
@@ -129,8 +129,8 @@ async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbCo
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EnableDuoData {
|
||||
host: String,
|
||||
secret_key: String,
|
||||
integration_key: String,
|
||||
client_secret: String,
|
||||
client_id: String,
|
||||
master_password_hash: Option<String>,
|
||||
otp: Option<String>,
|
||||
}
|
||||
@@ -139,8 +139,8 @@ impl From<EnableDuoData> for DuoData {
|
||||
fn from(d: EnableDuoData) -> Self {
|
||||
Self {
|
||||
host: d.host,
|
||||
ik: d.integration_key,
|
||||
sk: d.secret_key,
|
||||
ik: d.client_id,
|
||||
sk: d.client_secret,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -151,7 +151,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
||||
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
|
||||
}
|
||||
|
||||
!empty_or_default(&data.host) && !empty_or_default(&data.secret_key) && !empty_or_default(&data.integration_key)
|
||||
!empty_or_default(&data.host) && !empty_or_default(&data.client_secret) && !empty_or_default(&data.client_id)
|
||||
}
|
||||
|
||||
#[post("/two-factor/duo", data = "<data>")]
|
||||
@@ -186,8 +186,8 @@ async fn activate_duo(data: Json<EnableDuoData>, headers: Headers, mut conn: DbC
|
||||
Ok(Json(json!({
|
||||
"enabled": true,
|
||||
"host": data.host,
|
||||
"secretKey": data.sk,
|
||||
"integrationKey": data.ik,
|
||||
"clientSecret": data.sk,
|
||||
"clientId": data.ik,
|
||||
"object": "twoFactorDuo"
|
||||
})))
|
||||
}
|
||||
@@ -228,13 +228,12 @@ const AUTH_PREFIX: &str = "AUTH";
|
||||
const DUO_PREFIX: &str = "TX";
|
||||
const APP_PREFIX: &str = "APP";
|
||||
|
||||
async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus {
|
||||
async fn get_user_duo_data(user_id: &UserId, conn: &mut DbConn) -> DuoStatus {
|
||||
let type_ = TwoFactorType::Duo as i32;
|
||||
|
||||
// If the user doesn't have an entry, disabled
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await {
|
||||
Some(t) => t,
|
||||
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
||||
let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, type_, conn).await else {
|
||||
return DuoStatus::Disabled(DuoData::global().is_some());
|
||||
};
|
||||
|
||||
// If the user has the required values, we use those
|
||||
@@ -333,14 +332,12 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -
|
||||
err!("Prefixes don't match")
|
||||
}
|
||||
|
||||
let cookie_vec = match BASE64.decode(u_b64.as_bytes()) {
|
||||
Ok(c) => c,
|
||||
Err(_) => err!("Invalid Duo cookie encoding"),
|
||||
let Ok(cookie_vec) = BASE64.decode(u_b64.as_bytes()) else {
|
||||
err!("Invalid Duo cookie encoding")
|
||||
};
|
||||
|
||||
let cookie = match String::from_utf8(cookie_vec) {
|
||||
Ok(c) => c,
|
||||
Err(_) => err!("Invalid Duo cookie encoding"),
|
||||
let Ok(cookie) = String::from_utf8(cookie_vec) else {
|
||||
err!("Invalid Duo cookie encoding")
|
||||
};
|
||||
|
||||
let cookie_split: Vec<&str> = cookie.split('|').collect();
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::{
|
||||
api::{core::two_factor::duo::get_duo_keys_email, EmptyResult},
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactorDuoContext},
|
||||
models::{DeviceId, EventType, TwoFactorDuoContext},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
error::Error,
|
||||
@@ -211,10 +211,7 @@ impl DuoClient {
|
||||
nonce,
|
||||
};
|
||||
|
||||
let token = match self.encode_duo_jwt(jwt_payload) {
|
||||
Ok(token) => token,
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
let token = self.encode_duo_jwt(jwt_payload)?;
|
||||
|
||||
let authz_endpoint = format!("https://{}/oauth/v1/authorize", self.api_host);
|
||||
let mut auth_url = match Url::parse(authz_endpoint.as_str()) {
|
||||
@@ -382,7 +379,7 @@ fn make_callback_url(client_name: &str) -> Result<String, Error> {
|
||||
pub async fn get_duo_auth_url(
|
||||
email: &str,
|
||||
client_id: &str,
|
||||
device_identifier: &String,
|
||||
device_identifier: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<String, Error> {
|
||||
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
|
||||
@@ -420,7 +417,7 @@ pub async fn validate_duo_login(
|
||||
email: &str,
|
||||
two_factor_token: &str,
|
||||
client_id: &str,
|
||||
device_identifier: &str,
|
||||
device_identifier: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
// Result supplied to us by clients in the form "<authz code>|<state>"
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::{
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||
models::{EventType, TwoFactor, TwoFactorType, User, UserId},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
@@ -40,9 +40,8 @@ async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> E
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
let Some(user) = User::find_by_mail(&data.email, &mut conn).await else {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
};
|
||||
|
||||
// Check password
|
||||
@@ -60,10 +59,9 @@ async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> E
|
||||
}
|
||||
|
||||
/// Generate the token, save the data for later verification and send email to user
|
||||
pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn send_token(user_id: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
let mut twofactor =
|
||||
TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_id, type_, conn).await.map_res("Two factor not found")?;
|
||||
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
|
||||
@@ -174,9 +172,8 @@ async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> Jso
|
||||
|
||||
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
_ => err!("No token available"),
|
||||
let Some(issued_token) = &email_data.last_token else {
|
||||
err!("No token available")
|
||||
};
|
||||
|
||||
if !crypto::ct_eq(issued_token, data.token) {
|
||||
@@ -200,19 +197,18 @@ async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> Jso
|
||||
}
|
||||
|
||||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn validate_email_code_str(user_id: &UserId, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn)
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Email as i32, conn)
|
||||
.await
|
||||
.map_res("Two factor not found")?;
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
_ => err!(
|
||||
let Some(issued_token) = &email_data.last_token else {
|
||||
err!(
|
||||
"No token available",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
),
|
||||
)
|
||||
};
|
||||
|
||||
if !crypto::ct_eq(issued_token, token) {
|
||||
@@ -330,8 +326,8 @@ pub fn obscure_email(email: &str) -> String {
|
||||
format!("{}@{}", new_name, &domain)
|
||||
}
|
||||
|
||||
pub async fn find_and_activate_email_2fa(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
if let Some(user) = User::find_by_uuid(user_uuid, conn).await {
|
||||
pub async fn find_and_activate_email_2fa(user_id: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
if let Some(user) = User::find_by_uuid(user_id, conn).await {
|
||||
activate_email_2fa(&user, conn).await
|
||||
} else {
|
||||
err!("User not found!");
|
||||
|
||||
@@ -85,9 +85,8 @@ async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, mu
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let mut user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
let Some(mut user) = User::find_by_mail(&data.email, &mut conn).await else {
|
||||
err!("Username or password is incorrect. Try again.")
|
||||
};
|
||||
|
||||
// Check password
|
||||
@@ -174,17 +173,16 @@ async fn disable_twofactor_put(data: Json<DisableTwoFactorData>, headers: Header
|
||||
|
||||
pub async fn enforce_2fa_policy(
|
||||
user: &User,
|
||||
act_uuid: &str,
|
||||
act_user_id: &UserId,
|
||||
device_type: i32,
|
||||
ip: &std::net::IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn)
|
||||
.await
|
||||
.into_iter()
|
||||
for member in
|
||||
Membership::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn).await.into_iter()
|
||||
{
|
||||
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
||||
if member.atype < UserOrgType::Admin {
|
||||
if member.atype < MembershipType::Admin {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||
@@ -197,7 +195,7 @@ pub async fn enforce_2fa_policy(
|
||||
EventType::OrganizationUserRevoked as i32,
|
||||
&member.uuid,
|
||||
&member.org_uuid,
|
||||
act_uuid,
|
||||
act_user_id,
|
||||
device_type,
|
||||
ip,
|
||||
conn,
|
||||
@@ -210,16 +208,16 @@ pub async fn enforce_2fa_policy(
|
||||
}
|
||||
|
||||
pub async fn enforce_2fa_policy_for_org(
|
||||
org_uuid: &str,
|
||||
act_uuid: &str,
|
||||
org_id: &OrganizationId,
|
||||
act_user_id: &UserId,
|
||||
device_type: i32,
|
||||
ip: &std::net::IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap();
|
||||
for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() {
|
||||
let org = Organization::find_by_uuid(org_id, conn).await.unwrap();
|
||||
for member in Membership::find_confirmed_by_org(org_id, conn).await.into_iter() {
|
||||
// Don't enforce the policy for Admins and Owners.
|
||||
if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
|
||||
if member.atype < MembershipType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
|
||||
if CONFIG.mail_enabled() {
|
||||
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||
@@ -231,8 +229,8 @@ pub async fn enforce_2fa_policy_for_org(
|
||||
log_event(
|
||||
EventType::OrganizationUserRevoked as i32,
|
||||
&member.uuid,
|
||||
org_uuid,
|
||||
act_uuid,
|
||||
org_id,
|
||||
act_user_id,
|
||||
device_type,
|
||||
ip,
|
||||
conn,
|
||||
|
||||
@@ -6,7 +6,7 @@ use crate::{
|
||||
auth::Headers,
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
models::{TwoFactor, TwoFactorType, UserId},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
@@ -104,11 +104,11 @@ async fn verify_otp(data: Json<ProtectedActionVerify>, headers: Headers, mut con
|
||||
|
||||
pub async fn validate_protected_action_otp(
|
||||
otp: &str,
|
||||
user_uuid: &str,
|
||||
user_id: &UserId,
|
||||
delete_if_valid: bool,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn)
|
||||
let pa = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::ProtectedActions as i32, conn)
|
||||
.await
|
||||
.map_res("Protected action token not found, try sending the code again or restart the process")?;
|
||||
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::{
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType, UserId},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
@@ -148,7 +148,7 @@ async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Hea
|
||||
)?;
|
||||
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||
TwoFactor::new(user.uuid.clone(), type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||
|
||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||
challenge_value["status"] = "ok".into();
|
||||
@@ -309,17 +309,16 @@ async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn:
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let mut tf =
|
||||
match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("Webauthn data not found!"),
|
||||
};
|
||||
let Some(mut tf) =
|
||||
TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await
|
||||
else {
|
||||
err!("Webauthn data not found!")
|
||||
};
|
||||
|
||||
let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
|
||||
|
||||
let item_pos = match data.iter().position(|r| r.id == id) {
|
||||
Some(p) => p,
|
||||
None => err!("Webauthn entry not found"),
|
||||
let Some(item_pos) = data.iter().position(|r| r.id == id) else {
|
||||
err!("Webauthn entry not found")
|
||||
};
|
||||
|
||||
let removed_item = data.remove(item_pos);
|
||||
@@ -353,20 +352,20 @@ async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn:
|
||||
}
|
||||
|
||||
pub async fn get_webauthn_registrations(
|
||||
user_uuid: &str,
|
||||
user_id: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||
let type_ = TwoFactorType::Webauthn as i32;
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
||||
match TwoFactor::find_by_user_and_type(user_id, type_, conn).await {
|
||||
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
|
||||
None => Ok((false, Vec::new())), // If no data, return empty list
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> JsonResult {
|
||||
pub async fn generate_webauthn_login(user_id: &UserId, conn: &mut DbConn) -> JsonResult {
|
||||
// Load saved credentials
|
||||
let creds: Vec<Credential> =
|
||||
get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect();
|
||||
get_webauthn_registrations(user_id, conn).await?.1.into_iter().map(|r| r.credential).collect();
|
||||
|
||||
if creds.is_empty() {
|
||||
err!("No Webauthn devices registered")
|
||||
@@ -377,7 +376,7 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json
|
||||
let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?;
|
||||
|
||||
// Save the challenge state for later validation
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
||||
TwoFactor::new(user_id.clone(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
||||
.save(conn)
|
||||
.await?;
|
||||
|
||||
@@ -385,9 +384,9 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json
|
||||
Ok(Json(serde_json::to_value(response.public_key)?))
|
||||
}
|
||||
|
||||
pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
||||
let state = match TwoFactor::find_by_user_and_type(user_id, type_, conn).await {
|
||||
Some(tf) => {
|
||||
let state: AuthenticationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(conn).await?;
|
||||
@@ -404,7 +403,7 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
|
||||
let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?;
|
||||
let rsp: PublicKeyCredential = rsp.into();
|
||||
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
|
||||
let mut registrations = get_webauthn_registrations(user_id, conn).await?.1;
|
||||
|
||||
// If the credential we received is migrated from U2F, enable the U2F compatibility
|
||||
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
|
||||
@@ -414,7 +413,7 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
|
||||
if ®.credential.cred_id == cred_id {
|
||||
reg.credential.counter = auth_data.counter;
|
||||
|
||||
TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
TwoFactor::new(user_id.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(conn)
|
||||
.await?;
|
||||
return Ok(());
|
||||
|
||||
@@ -92,10 +92,10 @@ async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut c
|
||||
|
||||
data.validate(&user, false, &mut conn).await?;
|
||||
|
||||
let user_uuid = &user.uuid;
|
||||
let user_id = &user.uuid;
|
||||
let yubikey_type = TwoFactorType::YubiKey as i32;
|
||||
|
||||
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &mut conn).await;
|
||||
let r = TwoFactor::find_by_user_and_type(user_id, yubikey_type, &mut conn).await;
|
||||
|
||||
if let Some(r) = r {
|
||||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
|
||||
|
||||
@@ -19,7 +19,7 @@ use tokio::{
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
};
|
||||
|
||||
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
||||
use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer};
|
||||
|
||||
use crate::{
|
||||
error::Error,
|
||||
@@ -261,11 +261,7 @@ impl Icon {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_favicons_node(
|
||||
dom: InfallibleTokenizer<StringReader<'_>, FaviconEmitter>,
|
||||
icons: &mut Vec<Icon>,
|
||||
url: &url::Url,
|
||||
) {
|
||||
fn get_favicons_node(dom: Tokenizer<StringReader<'_>, FaviconEmitter>, icons: &mut Vec<Icon>, url: &url::Url) {
|
||||
const TAG_LINK: &[u8] = b"link";
|
||||
const TAG_BASE: &[u8] = b"base";
|
||||
const TAG_HEAD: &[u8] = b"head";
|
||||
@@ -274,7 +270,7 @@ fn get_favicons_node(
|
||||
|
||||
let mut base_url = url.clone();
|
||||
let mut icon_tags: Vec<Tag> = Vec::new();
|
||||
for token in dom {
|
||||
for Ok(token) in dom {
|
||||
let tag_name: &[u8] = &token.tag.name;
|
||||
match tag_name {
|
||||
TAG_LINK => {
|
||||
@@ -295,9 +291,7 @@ fn get_favicons_node(
|
||||
TAG_HEAD if token.closing => {
|
||||
break;
|
||||
}
|
||||
_ => {
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -401,7 +395,7 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
// 384KB should be more than enough for the HTML, though as we only really need the HTML header.
|
||||
let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec();
|
||||
|
||||
let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()).infallible();
|
||||
let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default());
|
||||
get_favicons_node(dom, &mut iconlist, &url);
|
||||
} else {
|
||||
// Add the default favicon.ico to the list with just the given domain
|
||||
@@ -662,7 +656,7 @@ impl reqwest::cookie::CookieStore for Jar {
|
||||
/// The FaviconEmitter is using an optimized version of the DefaultEmitter.
|
||||
/// This prevents emitting tags like comments, doctype and also strings between the tags.
|
||||
/// But it will also only emit the tags we need and only if they have the correct attributes
|
||||
/// Therefor parsing the HTML content is faster.
|
||||
/// Therefore parsing the HTML content is faster.
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
#[derive(Default)]
|
||||
|
||||
@@ -31,7 +31,7 @@ pub fn routes() -> Vec<Route> {
|
||||
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||
let data: ConnectData = data.into_inner();
|
||||
|
||||
let mut user_uuid: Option<String> = None;
|
||||
let mut user_id: Option<UserId> = None;
|
||||
|
||||
let login_result = match data.grant_type.as_ref() {
|
||||
"refresh_token" => {
|
||||
@@ -48,7 +48,7 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
|
||||
_password_login(data, &mut user_id, &mut conn, &client_header.ip).await
|
||||
}
|
||||
"client_credentials" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
@@ -59,17 +59,17 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_api_key_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
|
||||
_api_key_login(data, &mut user_id, &mut conn, &client_header.ip).await
|
||||
}
|
||||
t => err!("Invalid type", t),
|
||||
};
|
||||
|
||||
if let Some(user_uuid) = user_uuid {
|
||||
if let Some(user_id) = user_id {
|
||||
match &login_result {
|
||||
Ok(_) => {
|
||||
log_user_event(
|
||||
EventType::UserLoggedIn as i32,
|
||||
&user_uuid,
|
||||
&user_id,
|
||||
client_header.device_type,
|
||||
&client_header.ip.ip,
|
||||
&mut conn,
|
||||
@@ -80,7 +80,7 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
|
||||
if let Some(ev) = e.get_event() {
|
||||
log_user_event(
|
||||
ev.event as i32,
|
||||
&user_uuid,
|
||||
&user_id,
|
||||
client_header.device_type,
|
||||
&client_header.ip.ip,
|
||||
&mut conn,
|
||||
@@ -111,7 +111,7 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
@@ -141,7 +141,7 @@ struct MasterPasswordPolicy {
|
||||
|
||||
async fn _password_login(
|
||||
data: ConnectData,
|
||||
user_uuid: &mut Option<String>,
|
||||
user_id: &mut Option<UserId>,
|
||||
conn: &mut DbConn,
|
||||
ip: &ClientIp,
|
||||
) -> JsonResult {
|
||||
@@ -157,28 +157,29 @@ async fn _password_login(
|
||||
|
||||
// Get the user
|
||||
let username = data.username.as_ref().unwrap().trim();
|
||||
let mut user = match User::find_by_mail(username, conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
||||
let Some(mut user) = User::find_by_mail(username, conn).await else {
|
||||
err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
};
|
||||
|
||||
// Set the user_uuid here to be passed back used for event logging.
|
||||
*user_uuid = Some(user.uuid.clone());
|
||||
// Set the user_id here to be passed back used for event logging.
|
||||
*user_id = Some(user.uuid.clone());
|
||||
|
||||
// Check password
|
||||
let password = data.password.as_ref().unwrap();
|
||||
if let Some(auth_request_uuid) = data.auth_request.clone() {
|
||||
if let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await {
|
||||
if !auth_request.check_access_code(password) {
|
||||
err!(
|
||||
"Username or access code is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
} else {
|
||||
)
|
||||
}
|
||||
|
||||
let password = data.password.as_ref().unwrap();
|
||||
|
||||
// If we get an auth request, we don't check the user's password, but the access code of the auth request
|
||||
if let Some(ref auth_request_id) = data.auth_request {
|
||||
let Some(auth_request) = AuthRequest::find_by_uuid_and_user(auth_request_id, &user.uuid, conn).await else {
|
||||
err!(
|
||||
"Auth request not found. Try again.",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
@@ -186,6 +187,24 @@ async fn _password_login(
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
};
|
||||
|
||||
let expiration_time = auth_request.creation_date + chrono::Duration::minutes(5);
|
||||
let request_expired = Utc::now().naive_utc() >= expiration_time;
|
||||
|
||||
if auth_request.user_uuid != user.uuid
|
||||
|| !auth_request.approved.unwrap_or(false)
|
||||
|| request_expired
|
||||
|| ip.ip.to_string() != auth_request.request_ip
|
||||
|| !auth_request.check_access_code(password)
|
||||
{
|
||||
err!(
|
||||
"Username or access code is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
}
|
||||
} else if !user.check_valid_password(password) {
|
||||
err!(
|
||||
@@ -197,8 +216,8 @@ async fn _password_login(
|
||||
)
|
||||
}
|
||||
|
||||
// Change the KDF Iterations
|
||||
if user.password_iterations != CONFIG.password_iterations() {
|
||||
// Change the KDF Iterations (only when not logging in with an auth request)
|
||||
if data.auth_request.is_none() && user.password_iterations != CONFIG.password_iterations() {
|
||||
user.password_iterations = CONFIG.password_iterations();
|
||||
user.set_password(password, None, false, None);
|
||||
|
||||
@@ -207,17 +226,6 @@ async fn _password_login(
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||
@@ -282,7 +290,7 @@ async fn _password_login(
|
||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
@@ -350,7 +358,7 @@ async fn _password_login(
|
||||
|
||||
async fn _api_key_login(
|
||||
data: ConnectData,
|
||||
user_uuid: &mut Option<String>,
|
||||
user_id: &mut Option<UserId>,
|
||||
conn: &mut DbConn,
|
||||
ip: &ClientIp,
|
||||
) -> JsonResult {
|
||||
@@ -359,7 +367,7 @@ async fn _api_key_login(
|
||||
|
||||
// Validate scope
|
||||
match data.scope.as_ref().unwrap().as_ref() {
|
||||
"api" => _user_api_key_login(data, user_uuid, conn, ip).await,
|
||||
"api" => _user_api_key_login(data, user_id, conn, ip).await,
|
||||
"api.organization" => _organization_api_key_login(data, conn, ip).await,
|
||||
_ => err!("Scope not supported"),
|
||||
}
|
||||
@@ -367,23 +375,22 @@ async fn _api_key_login(
|
||||
|
||||
async fn _user_api_key_login(
|
||||
data: ConnectData,
|
||||
user_uuid: &mut Option<String>,
|
||||
user_id: &mut Option<UserId>,
|
||||
conn: &mut DbConn,
|
||||
ip: &ClientIp,
|
||||
) -> JsonResult {
|
||||
// Get the user via the client_id
|
||||
let client_id = data.client_id.as_ref().unwrap();
|
||||
let client_user_uuid = match client_id.strip_prefix("user.") {
|
||||
Some(uuid) => uuid,
|
||||
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
|
||||
let Some(client_user_id) = client_id.strip_prefix("user.") else {
|
||||
err!("Malformed client_id", format!("IP: {}.", ip.ip))
|
||||
};
|
||||
let user = match User::find_by_uuid(client_user_uuid, conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
|
||||
let client_user_id: UserId = client_user_id.into();
|
||||
let Some(user) = User::find_by_uuid(&client_user_id, conn).await else {
|
||||
err!("Invalid client_id", format!("IP: {}.", ip.ip))
|
||||
};
|
||||
|
||||
// Set the user_uuid here to be passed back used for event logging.
|
||||
*user_uuid = Some(user.uuid.clone());
|
||||
// Set the user_id here to be passed back used for event logging.
|
||||
*user_id = Some(user.uuid.clone());
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
@@ -433,7 +440,7 @@ async fn _user_api_key_login(
|
||||
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||
// ---
|
||||
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
// let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
@@ -462,13 +469,12 @@ async fn _user_api_key_login(
|
||||
async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult {
|
||||
// Get the org via the client_id
|
||||
let client_id = data.client_id.as_ref().unwrap();
|
||||
let org_uuid = match client_id.strip_prefix("organization.") {
|
||||
Some(uuid) => uuid,
|
||||
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
|
||||
let Some(org_id) = client_id.strip_prefix("organization.") else {
|
||||
err!("Malformed client_id", format!("IP: {}.", ip.ip))
|
||||
};
|
||||
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, conn).await {
|
||||
Some(org_api_key) => org_api_key,
|
||||
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
|
||||
let org_id: OrganizationId = org_id.to_string().into();
|
||||
let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(&org_id, conn).await else {
|
||||
err!("Invalid client_id", format!("IP: {}.", ip.ip))
|
||||
};
|
||||
|
||||
// Check API key.
|
||||
@@ -609,7 +615,7 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
||||
|
||||
async fn _json_err_twofactor(
|
||||
providers: &[i32],
|
||||
user_uuid: &str,
|
||||
user_id: &UserId,
|
||||
data: &ConnectData,
|
||||
conn: &mut DbConn,
|
||||
) -> ApiResult<Value> {
|
||||
@@ -630,12 +636,12 @@ async fn _json_err_twofactor(
|
||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||
|
||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||
let request = webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
||||
let request = webauthn::generate_webauthn_login(user_id, conn).await?;
|
||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Duo) => {
|
||||
let email = match User::find_by_uuid(user_uuid, conn).await {
|
||||
let email = match User::find_by_uuid(user_id, conn).await {
|
||||
Some(u) => u.email,
|
||||
None => err!("User does not exist"),
|
||||
};
|
||||
@@ -667,9 +673,8 @@ async fn _json_err_twofactor(
|
||||
}
|
||||
|
||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("No YubiKey devices registered"),
|
||||
let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, tf_type as i32, conn).await else {
|
||||
err!("No YubiKey devices registered")
|
||||
};
|
||||
|
||||
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
|
||||
@@ -680,14 +685,13 @@ async fn _json_err_twofactor(
|
||||
}
|
||||
|
||||
Some(tf_type @ TwoFactorType::Email) => {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("No twofactor email registered"),
|
||||
let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, tf_type as i32, conn).await else {
|
||||
err!("No twofactor email registered")
|
||||
};
|
||||
|
||||
// Send email immediately if email is the only 2FA option
|
||||
if providers.len() == 1 {
|
||||
email::send_token(user_uuid, conn).await?
|
||||
email::send_token(user_id, conn).await?
|
||||
}
|
||||
|
||||
let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
|
||||
@@ -742,7 +746,7 @@ struct ConnectData {
|
||||
|
||||
#[field(name = uncased("device_identifier"))]
|
||||
#[field(name = uncased("deviceidentifier"))]
|
||||
device_identifier: Option<String>,
|
||||
device_identifier: Option<DeviceId>,
|
||||
#[field(name = uncased("device_name"))]
|
||||
#[field(name = uncased("devicename"))]
|
||||
device_name: Option<String>,
|
||||
@@ -765,7 +769,7 @@ struct ConnectData {
|
||||
#[field(name = uncased("twofactorremember"))]
|
||||
two_factor_remember: Option<i32>,
|
||||
#[field(name = uncased("authrequest"))]
|
||||
auth_request: Option<String>,
|
||||
auth_request: Option<AuthRequestId>,
|
||||
}
|
||||
|
||||
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
||||
|
||||
@@ -10,7 +10,7 @@ use rocket_ws::{Message, WebSocket};
|
||||
use crate::{
|
||||
auth::{ClientIp, WsAccessTokenHeader},
|
||||
db::{
|
||||
models::{Cipher, Folder, Send as DbSend, User},
|
||||
models::{AuthRequestId, Cipher, CollectionId, DeviceId, Folder, Send as DbSend, User, UserId},
|
||||
DbConn,
|
||||
},
|
||||
Error, CONFIG,
|
||||
@@ -53,13 +53,13 @@ struct WsAccessToken {
|
||||
|
||||
struct WSEntryMapGuard {
|
||||
users: Arc<WebSocketUsers>,
|
||||
user_uuid: String,
|
||||
user_uuid: UserId,
|
||||
entry_uuid: uuid::Uuid,
|
||||
addr: IpAddr,
|
||||
}
|
||||
|
||||
impl WSEntryMapGuard {
|
||||
fn new(users: Arc<WebSocketUsers>, user_uuid: String, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self {
|
||||
fn new(users: Arc<WebSocketUsers>, user_uuid: UserId, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self {
|
||||
Self {
|
||||
users,
|
||||
user_uuid,
|
||||
@@ -72,7 +72,7 @@ impl WSEntryMapGuard {
|
||||
impl Drop for WSEntryMapGuard {
|
||||
fn drop(&mut self) {
|
||||
info!("Closing WS connection from {}", self.addr);
|
||||
if let Some(mut entry) = self.users.map.get_mut(&self.user_uuid) {
|
||||
if let Some(mut entry) = self.users.map.get_mut(self.user_uuid.as_ref()) {
|
||||
entry.retain(|(uuid, _)| uuid != &self.entry_uuid);
|
||||
}
|
||||
}
|
||||
@@ -101,6 +101,7 @@ impl Drop for WSAnonymousEntryMapGuard {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(tail_expr_drop_order)]
|
||||
#[get("/hub?<data..>")]
|
||||
fn websockets_hub<'r>(
|
||||
ws: WebSocket,
|
||||
@@ -129,7 +130,7 @@ fn websockets_hub<'r>(
|
||||
// Add a channel to send messages to this client to the map
|
||||
let entry_uuid = uuid::Uuid::new_v4();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
||||
users.map.entry(claims.sub.clone()).or_default().push((entry_uuid, tx));
|
||||
users.map.entry(claims.sub.to_string()).or_default().push((entry_uuid, tx));
|
||||
|
||||
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
||||
(rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr))
|
||||
@@ -156,7 +157,6 @@ fn websockets_hub<'r>(
|
||||
|
||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||
yield Message::binary(INITIAL_RESPONSE);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,6 +186,7 @@ fn websockets_hub<'r>(
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(tail_expr_drop_order)]
|
||||
#[get("/anonymous-hub?<token..>")]
|
||||
fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result<rocket_ws::Stream!['r], Error> {
|
||||
let addr = ip.ip;
|
||||
@@ -223,7 +224,6 @@ fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> R
|
||||
|
||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||
yield Message::binary(INITIAL_RESPONSE);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,7 +290,7 @@ fn serialize(val: Value) -> Vec<u8> {
|
||||
fn serialize_date(date: NaiveDateTime) -> Value {
|
||||
let seconds: i64 = date.and_utc().timestamp();
|
||||
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
|
||||
let timestamp = nanos << 34 | seconds;
|
||||
let timestamp = (nanos << 34) | seconds;
|
||||
|
||||
let bs = timestamp.to_be_bytes();
|
||||
|
||||
@@ -328,8 +328,8 @@ pub struct WebSocketUsers {
|
||||
}
|
||||
|
||||
impl WebSocketUsers {
|
||||
async fn send_update(&self, user_uuid: &str, data: &[u8]) {
|
||||
if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) {
|
||||
async fn send_update(&self, user_id: &UserId, data: &[u8]) {
|
||||
if let Some(user) = self.map.get(user_id.as_ref()).map(|v| v.clone()) {
|
||||
for (_, sender) in user.iter() {
|
||||
if let Err(e) = sender.send(Message::binary(data)).await {
|
||||
error!("Error sending WS update {e}");
|
||||
@@ -345,7 +345,7 @@ impl WebSocketUsers {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
ut,
|
||||
None,
|
||||
);
|
||||
@@ -359,15 +359,15 @@ impl WebSocketUsers {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
|
||||
pub async fn send_logout(&self, user: &User, acting_device_id: Option<DeviceId>) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
UpdateType::LogOut,
|
||||
acting_device_uuid.clone(),
|
||||
acting_device_id.clone(),
|
||||
);
|
||||
|
||||
if CONFIG.enable_websocket() {
|
||||
@@ -375,7 +375,7 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_logout(user, acting_device_uuid);
|
||||
push_logout(user, acting_device_id.clone());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -383,7 +383,7 @@ impl WebSocketUsers {
|
||||
&self,
|
||||
ut: UpdateType,
|
||||
folder: &Folder,
|
||||
acting_device_uuid: &String,
|
||||
acting_device_id: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
@@ -392,12 +392,12 @@ impl WebSocketUsers {
|
||||
}
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), folder.uuid.clone().into()),
|
||||
("UserId".into(), folder.user_uuid.clone().into()),
|
||||
("Id".into(), folder.uuid.to_string().into()),
|
||||
("UserId".into(), folder.user_uuid.to_string().into()),
|
||||
("RevisionDate".into(), serialize_date(folder.updated_at)),
|
||||
],
|
||||
ut,
|
||||
Some(acting_device_uuid.into()),
|
||||
Some(acting_device_id.clone()),
|
||||
);
|
||||
|
||||
if CONFIG.enable_websocket() {
|
||||
@@ -405,7 +405,7 @@ impl WebSocketUsers {
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_folder_update(ut, folder, acting_device_uuid, conn).await;
|
||||
push_folder_update(ut, folder, acting_device_id, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,48 +413,48 @@ impl WebSocketUsers {
|
||||
&self,
|
||||
ut: UpdateType,
|
||||
cipher: &Cipher,
|
||||
user_uuids: &[String],
|
||||
acting_device_uuid: &String,
|
||||
collection_uuids: Option<Vec<String>>,
|
||||
user_ids: &[UserId],
|
||||
acting_device_id: &DeviceId,
|
||||
collection_uuids: Option<Vec<CollectionId>>,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||
let org_id = convert_option(cipher.organization_uuid.as_deref());
|
||||
// Depending if there are collections provided or not, we need to have different values for the following variables.
|
||||
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
|
||||
let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
|
||||
let (user_id, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
|
||||
(
|
||||
Value::Nil,
|
||||
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<Value>>()),
|
||||
Value::Array(collection_uuids.into_iter().map(|v| v.to_string().into()).collect::<Vec<Value>>()),
|
||||
serialize_date(Utc::now().naive_utc()),
|
||||
)
|
||||
} else {
|
||||
(convert_option(cipher.user_uuid.clone()), Value::Nil, serialize_date(cipher.updated_at))
|
||||
(convert_option(cipher.user_uuid.as_deref()), Value::Nil, serialize_date(cipher.updated_at))
|
||||
};
|
||||
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), cipher.uuid.clone().into()),
|
||||
("UserId".into(), user_uuid),
|
||||
("OrganizationId".into(), org_uuid),
|
||||
("Id".into(), cipher.uuid.to_string().into()),
|
||||
("UserId".into(), user_id),
|
||||
("OrganizationId".into(), org_id),
|
||||
("CollectionIds".into(), collection_uuids),
|
||||
("RevisionDate".into(), revision_date),
|
||||
],
|
||||
ut,
|
||||
Some(acting_device_uuid.into()),
|
||||
Some(acting_device_id.clone()),
|
||||
);
|
||||
|
||||
if CONFIG.enable_websocket() {
|
||||
for uuid in user_uuids {
|
||||
for uuid in user_ids {
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
||||
push_cipher_update(ut, cipher, acting_device_uuid, conn).await;
|
||||
if CONFIG.push_enabled() && user_ids.len() == 1 {
|
||||
push_cipher_update(ut, cipher, acting_device_id, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -462,20 +462,20 @@ impl WebSocketUsers {
|
||||
&self,
|
||||
ut: UpdateType,
|
||||
send: &DbSend,
|
||||
user_uuids: &[String],
|
||||
acting_device_uuid: &String,
|
||||
user_ids: &[UserId],
|
||||
acting_device_id: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
if *NOTIFICATIONS_DISABLED {
|
||||
return;
|
||||
}
|
||||
let user_uuid = convert_option(send.user_uuid.clone());
|
||||
let user_id = convert_option(send.user_uuid.as_deref());
|
||||
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), send.uuid.clone().into()),
|
||||
("UserId".into(), user_uuid),
|
||||
("Id".into(), send.uuid.to_string().into()),
|
||||
("UserId".into(), user_id),
|
||||
("RevisionDate".into(), serialize_date(send.revision_date)),
|
||||
],
|
||||
ut,
|
||||
@@ -483,20 +483,20 @@ impl WebSocketUsers {
|
||||
);
|
||||
|
||||
if CONFIG.enable_websocket() {
|
||||
for uuid in user_uuids {
|
||||
for uuid in user_ids {
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
}
|
||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
||||
push_send_update(ut, send, acting_device_uuid, conn).await;
|
||||
if CONFIG.push_enabled() && user_ids.len() == 1 {
|
||||
push_send_update(ut, send, acting_device_id, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_auth_request(
|
||||
&self,
|
||||
user_uuid: &String,
|
||||
user_id: &UserId,
|
||||
auth_request_uuid: &String,
|
||||
acting_device_uuid: &String,
|
||||
acting_device_id: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
@@ -504,24 +504,24 @@ impl WebSocketUsers {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_id.to_string().into())],
|
||||
UpdateType::AuthRequest,
|
||||
Some(acting_device_uuid.to_string()),
|
||||
Some(acting_device_id.clone()),
|
||||
);
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(user_uuid, &data).await;
|
||||
self.send_update(user_id, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
|
||||
push_auth_request(user_id.clone(), auth_request_uuid.to_string(), conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_auth_response(
|
||||
&self,
|
||||
user_uuid: &String,
|
||||
auth_response_uuid: &str,
|
||||
approving_device_uuid: String,
|
||||
user_id: &UserId,
|
||||
auth_request_id: &AuthRequestId,
|
||||
approving_device_id: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Skip any processing if both WebSockets and Push are not active
|
||||
@@ -529,17 +529,16 @@ impl WebSocketUsers {
|
||||
return;
|
||||
}
|
||||
let data = create_update(
|
||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())],
|
||||
UpdateType::AuthRequestResponse,
|
||||
approving_device_uuid.clone().into(),
|
||||
Some(approving_device_id.clone()),
|
||||
);
|
||||
if CONFIG.enable_websocket() {
|
||||
self.send_update(auth_response_uuid, &data).await;
|
||||
self.send_update(user_id, &data).await;
|
||||
}
|
||||
|
||||
if CONFIG.push_enabled() {
|
||||
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
|
||||
.await;
|
||||
push_auth_response(user_id, auth_request_id, approving_device_id, conn).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -558,16 +557,16 @@ impl AnonymousWebSocketSubscriptions {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
|
||||
pub async fn send_auth_response(&self, user_id: &UserId, auth_request_id: &AuthRequestId) {
|
||||
if !CONFIG.enable_websocket() {
|
||||
return;
|
||||
}
|
||||
let data = create_anonymous_update(
|
||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||
vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())],
|
||||
UpdateType::AuthRequestResponse,
|
||||
user_uuid.to_string(),
|
||||
user_id.clone(),
|
||||
);
|
||||
self.send_update(auth_response_uuid, &data).await;
|
||||
self.send_update(auth_request_id, &data).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -579,14 +578,14 @@ impl AnonymousWebSocketSubscriptions {
|
||||
"ReceiveMessage", // Target
|
||||
[ // Arguments
|
||||
{
|
||||
"ContextId": acting_device_uuid || Nil,
|
||||
"ContextId": acting_device_id || Nil,
|
||||
"Type": ut as i32,
|
||||
"Payload": {}
|
||||
}
|
||||
]
|
||||
]
|
||||
*/
|
||||
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uuid: Option<String>) -> Vec<u8> {
|
||||
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_id: Option<DeviceId>) -> Vec<u8> {
|
||||
use rmpv::Value as V;
|
||||
|
||||
let value = V::Array(vec![
|
||||
@@ -595,7 +594,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui
|
||||
V::Nil,
|
||||
"ReceiveMessage".into(),
|
||||
V::Array(vec![V::Map(vec![
|
||||
("ContextId".into(), acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| V::Nil)),
|
||||
("ContextId".into(), acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| V::Nil)),
|
||||
("Type".into(), (ut as i32).into()),
|
||||
("Payload".into(), payload.into()),
|
||||
])]),
|
||||
@@ -604,7 +603,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui
|
||||
serialize(value)
|
||||
}
|
||||
|
||||
fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: String) -> Vec<u8> {
|
||||
fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: UserId) -> Vec<u8> {
|
||||
use rmpv::Value as V;
|
||||
|
||||
let value = V::Array(vec![
|
||||
@@ -615,7 +614,7 @@ fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id
|
||||
V::Array(vec![V::Map(vec![
|
||||
("Type".into(), (ut as i32).into()),
|
||||
("Payload".into(), payload.into()),
|
||||
("UserId".into(), user_id.into()),
|
||||
("UserId".into(), user_id.to_string().into()),
|
||||
])]),
|
||||
]);
|
||||
|
||||
|
||||
104
src/api/push.rs
104
src/api/push.rs
@@ -7,8 +7,9 @@ use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, UpdateType},
|
||||
db::models::{Cipher, Device, Folder, Send, User},
|
||||
db::models::{AuthRequestId, Cipher, Device, DeviceId, Folder, Send, User, UserId},
|
||||
http_client::make_http_request,
|
||||
util::format_date,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -125,15 +126,15 @@ pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbC
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() || push_uuid.is_none() {
|
||||
pub async fn unregister_push_device(push_id: Option<String>) -> EmptyResult {
|
||||
if !CONFIG.push_enabled() || push_id.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
let auth_push_token = get_auth_push_token().await?;
|
||||
|
||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||
|
||||
match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap()))?
|
||||
match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_id.unwrap()))?
|
||||
.header(AUTHORIZATION, auth_header)
|
||||
.send()
|
||||
.await
|
||||
@@ -147,51 +148,48 @@ pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
|
||||
pub async fn push_cipher_update(
|
||||
ut: UpdateType,
|
||||
cipher: &Cipher,
|
||||
acting_device_uuid: &String,
|
||||
acting_device_id: &DeviceId,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
// We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too.
|
||||
if cipher.organization_uuid.is_some() {
|
||||
return;
|
||||
};
|
||||
let user_uuid = match &cipher.user_uuid {
|
||||
Some(c) => c,
|
||||
None => {
|
||||
debug!("Cipher has no uuid");
|
||||
return;
|
||||
}
|
||||
let Some(user_id) = &cipher.user_uuid else {
|
||||
debug!("Cipher has no uuid");
|
||||
return;
|
||||
};
|
||||
|
||||
if Device::check_user_has_push_device(user_uuid, conn).await {
|
||||
if Device::check_user_has_push_device(user_id, conn).await {
|
||||
send_to_push_relay(json!({
|
||||
"userId": user_uuid,
|
||||
"userId": user_id,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_uuid,
|
||||
"identifier": acting_device_uuid,
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"id": cipher.uuid,
|
||||
"userId": cipher.user_uuid,
|
||||
"organizationId": (),
|
||||
"revisionDate": cipher.updated_at
|
||||
"Id": cipher.uuid,
|
||||
"UserId": cipher.user_uuid,
|
||||
"OrganizationId": (),
|
||||
"RevisionDate": format_date(&cipher.updated_at)
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_logout(user: &User, acting_device_uuid: Option<String>) {
|
||||
let acting_device_uuid: Value = acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| Value::Null);
|
||||
pub fn push_logout(user: &User, acting_device_id: Option<DeviceId>) {
|
||||
let acting_device_id: Value = acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| Value::Null);
|
||||
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user.uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_uuid,
|
||||
"identifier": acting_device_uuid,
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"type": UpdateType::LogOut as i32,
|
||||
"payload": {
|
||||
"userId": user.uuid,
|
||||
"date": user.updated_at
|
||||
"UserId": user.uuid,
|
||||
"Date": format_date(&user.updated_at)
|
||||
}
|
||||
})));
|
||||
}
|
||||
@@ -204,8 +202,8 @@ pub fn push_user_update(ut: UpdateType, user: &User) {
|
||||
"identifier": (),
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"userId": user.uuid,
|
||||
"date": user.updated_at
|
||||
"UserId": user.uuid,
|
||||
"Date": format_date(&user.updated_at)
|
||||
}
|
||||
})));
|
||||
}
|
||||
@@ -213,38 +211,38 @@ pub fn push_user_update(ut: UpdateType, user: &User) {
|
||||
pub async fn push_folder_update(
|
||||
ut: UpdateType,
|
||||
folder: &Folder,
|
||||
acting_device_uuid: &String,
|
||||
acting_device_id: &DeviceId,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
if Device::check_user_has_push_device(&folder.user_uuid, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": folder.user_uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_uuid,
|
||||
"identifier": acting_device_uuid,
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"id": folder.uuid,
|
||||
"userId": folder.user_uuid,
|
||||
"revisionDate": folder.updated_at
|
||||
"Id": folder.uuid,
|
||||
"UserId": folder.user_uuid,
|
||||
"RevisionDate": format_date(&folder.updated_at)
|
||||
}
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_uuid: &String, conn: &mut crate::db::DbConn) {
|
||||
pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_id: &DeviceId, conn: &mut crate::db::DbConn) {
|
||||
if let Some(s) = &send.user_uuid {
|
||||
if Device::check_user_has_push_device(s, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": send.user_uuid,
|
||||
"organizationId": (),
|
||||
"deviceId": acting_device_uuid,
|
||||
"identifier": acting_device_uuid,
|
||||
"deviceId": acting_device_id,
|
||||
"identifier": acting_device_id,
|
||||
"type": ut as i32,
|
||||
"payload": {
|
||||
"id": send.uuid,
|
||||
"userId": send.user_uuid,
|
||||
"revisionDate": send.revision_date
|
||||
"Id": send.uuid,
|
||||
"UserId": send.user_uuid,
|
||||
"RevisionDate": format_date(&send.revision_date)
|
||||
}
|
||||
})));
|
||||
}
|
||||
@@ -286,38 +284,38 @@ async fn send_to_push_relay(notification_data: Value) {
|
||||
};
|
||||
}
|
||||
|
||||
pub async fn push_auth_request(user_uuid: String, auth_request_uuid: String, conn: &mut crate::db::DbConn) {
|
||||
if Device::check_user_has_push_device(user_uuid.as_str(), conn).await {
|
||||
pub async fn push_auth_request(user_id: UserId, auth_request_id: String, conn: &mut crate::db::DbConn) {
|
||||
if Device::check_user_has_push_device(&user_id, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user_uuid,
|
||||
"userId": user_id,
|
||||
"organizationId": (),
|
||||
"deviceId": null,
|
||||
"identifier": null,
|
||||
"type": UpdateType::AuthRequest as i32,
|
||||
"payload": {
|
||||
"id": auth_request_uuid,
|
||||
"userId": user_uuid,
|
||||
"Id": auth_request_id,
|
||||
"UserId": user_id,
|
||||
}
|
||||
})));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push_auth_response(
|
||||
user_uuid: String,
|
||||
auth_request_uuid: String,
|
||||
approving_device_uuid: String,
|
||||
user_id: &UserId,
|
||||
auth_request_id: &AuthRequestId,
|
||||
approving_device_id: &DeviceId,
|
||||
conn: &mut crate::db::DbConn,
|
||||
) {
|
||||
if Device::check_user_has_push_device(user_uuid.as_str(), conn).await {
|
||||
if Device::check_user_has_push_device(user_id, conn).await {
|
||||
tokio::task::spawn(send_to_push_relay(json!({
|
||||
"userId": user_uuid,
|
||||
"userId": user_id,
|
||||
"organizationId": (),
|
||||
"deviceId": approving_device_uuid,
|
||||
"identifier": approving_device_uuid,
|
||||
"deviceId": approving_device_id,
|
||||
"identifier": approving_device_id,
|
||||
"type": UpdateType::AuthRequestResponse as i32,
|
||||
"payload": {
|
||||
"id": auth_request_uuid,
|
||||
"userId": user_uuid,
|
||||
"Id": auth_request_id,
|
||||
"UserId": user_id,
|
||||
}
|
||||
})));
|
||||
}
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rocket::{fs::NamedFile, http::ContentType, response::content::RawHtml as Html, serde::json::Json, Catcher, Route};
|
||||
use rocket::{
|
||||
fs::NamedFile,
|
||||
http::ContentType,
|
||||
response::{content::RawCss as Css, content::RawHtml as Html, Redirect},
|
||||
serde::json::Json,
|
||||
Catcher, Route,
|
||||
};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{core::now, ApiResult, EmptyResult},
|
||||
auth::decode_file_download,
|
||||
db::models::{AttachmentId, CipherId},
|
||||
error::Error,
|
||||
util::{Cached, SafeString},
|
||||
util::Cached,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
@@ -16,7 +23,7 @@ pub fn routes() -> Vec<Route> {
|
||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
let mut routes = routes![attachments, alive, alive_head, static_files];
|
||||
if CONFIG.web_vault_enabled() {
|
||||
routes.append(&mut routes![web_index, web_index_head, app_id, web_files]);
|
||||
routes.append(&mut routes![web_index, web_index_direct, web_index_head, app_id, web_files, vaultwarden_css]);
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
@@ -45,11 +52,65 @@ fn not_found() -> ApiResult<Html<String>> {
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/css/vaultwarden.css")]
|
||||
fn vaultwarden_css() -> Cached<Css<String>> {
|
||||
let css_options = json!({
|
||||
"signup_disabled": !CONFIG.signups_allowed() && CONFIG.signups_domains_whitelist().is_empty(),
|
||||
"mail_enabled": CONFIG.mail_enabled(),
|
||||
"yubico_enabled": CONFIG._enable_yubico() && (CONFIG.yubico_client_id().is_some() == CONFIG.yubico_secret_key().is_some()),
|
||||
"emergency_access_allowed": CONFIG.emergency_access_allowed(),
|
||||
"sends_allowed": CONFIG.sends_allowed(),
|
||||
"load_user_scss": true,
|
||||
});
|
||||
|
||||
let scss = match CONFIG.render_template("scss/vaultwarden.scss", &css_options) {
|
||||
Ok(t) => t,
|
||||
Err(e) => {
|
||||
// Something went wrong loading the template. Use the fallback
|
||||
warn!("Loading scss/vaultwarden.scss.hbs or scss/user.vaultwarden.scss.hbs failed. {e}");
|
||||
CONFIG
|
||||
.render_fallback_template("scss/vaultwarden.scss", &css_options)
|
||||
.expect("Fallback scss/vaultwarden.scss.hbs to render")
|
||||
}
|
||||
};
|
||||
|
||||
let css = match grass_compiler::from_string(
|
||||
scss,
|
||||
&grass_compiler::Options::default().style(grass_compiler::OutputStyle::Compressed),
|
||||
) {
|
||||
Ok(css) => css,
|
||||
Err(e) => {
|
||||
// Something went wrong compiling the scss. Use the fallback
|
||||
warn!("Compiling the Vaultwarden SCSS styles failed. {e}");
|
||||
let mut css_options = css_options;
|
||||
css_options["load_user_scss"] = json!(false);
|
||||
let scss = CONFIG
|
||||
.render_fallback_template("scss/vaultwarden.scss", &css_options)
|
||||
.expect("Fallback scss/vaultwarden.scss.hbs to render");
|
||||
grass_compiler::from_string(
|
||||
scss,
|
||||
&grass_compiler::Options::default().style(grass_compiler::OutputStyle::Compressed),
|
||||
)
|
||||
.expect("SCSS to compile")
|
||||
}
|
||||
};
|
||||
|
||||
// Cache for one day should be enough and not too much
|
||||
Cached::ttl(Css(css), 86_400, false)
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
async fn web_index() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false)
|
||||
}
|
||||
|
||||
// Make sure that `/index.html` redirect to actual domain path.
|
||||
// If not, this might cause issues with the web-vault
|
||||
#[get("/index.html")]
|
||||
fn web_index_direct() -> Redirect {
|
||||
Redirect::to(format!("{}/", CONFIG.domain_path()))
|
||||
}
|
||||
|
||||
#[head("/")]
|
||||
fn web_index_head() -> EmptyResult {
|
||||
// Add an explicit HEAD route to prevent uptime monitoring services from
|
||||
@@ -98,16 +159,16 @@ async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true)
|
||||
}
|
||||
|
||||
#[get("/attachments/<uuid>/<file_id>?<token>")]
|
||||
async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Option<NamedFile> {
|
||||
#[get("/attachments/<cipher_id>/<file_id>?<token>")]
|
||||
async fn attachments(cipher_id: CipherId, file_id: AttachmentId, token: String) -> Option<NamedFile> {
|
||||
let Ok(claims) = decode_file_download(&token) else {
|
||||
return None;
|
||||
};
|
||||
if claims.sub != *uuid || claims.file_id != *file_id {
|
||||
if claims.sub != cipher_id || claims.file_id != file_id {
|
||||
return None;
|
||||
}
|
||||
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok()
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(cipher_id.as_ref()).join(file_id.as_ref())).await.ok()
|
||||
}
|
||||
|
||||
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
||||
|
||||
246
src/auth.rs
246
src/auth.rs
@@ -14,6 +14,10 @@ use std::{
|
||||
net::IpAddr,
|
||||
};
|
||||
|
||||
use crate::db::models::{
|
||||
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
|
||||
SendFileId, SendId, UserId,
|
||||
};
|
||||
use crate::{error::Error, CONFIG};
|
||||
|
||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
@@ -150,7 +154,7 @@ pub struct LoginJwtClaims {
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
pub sub: UserId,
|
||||
|
||||
pub premium: bool,
|
||||
pub name: String,
|
||||
@@ -171,7 +175,7 @@ pub struct LoginJwtClaims {
|
||||
// user security_stamp
|
||||
pub sstamp: String,
|
||||
// device uuid
|
||||
pub device: String,
|
||||
pub device: DeviceId,
|
||||
// [ "api", "offline_access" ]
|
||||
pub scope: Vec<String>,
|
||||
// [ "Application" ]
|
||||
@@ -187,19 +191,19 @@ pub struct InviteJwtClaims {
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
pub sub: UserId,
|
||||
|
||||
pub email: String,
|
||||
pub org_id: Option<String>,
|
||||
pub user_org_id: Option<String>,
|
||||
pub org_id: OrganizationId,
|
||||
pub member_id: MembershipId,
|
||||
pub invited_by_email: Option<String>,
|
||||
}
|
||||
|
||||
pub fn generate_invite_claims(
|
||||
uuid: String,
|
||||
user_id: UserId,
|
||||
email: String,
|
||||
org_id: Option<String>,
|
||||
user_org_id: Option<String>,
|
||||
org_id: OrganizationId,
|
||||
member_id: MembershipId,
|
||||
invited_by_email: Option<String>,
|
||||
) -> InviteJwtClaims {
|
||||
let time_now = Utc::now();
|
||||
@@ -208,10 +212,10 @@ pub fn generate_invite_claims(
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||
iss: JWT_INVITE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
sub: user_id,
|
||||
email,
|
||||
org_id,
|
||||
user_org_id,
|
||||
member_id,
|
||||
invited_by_email,
|
||||
}
|
||||
}
|
||||
@@ -225,18 +229,18 @@ pub struct EmergencyAccessInviteJwtClaims {
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
pub sub: UserId,
|
||||
|
||||
pub email: String,
|
||||
pub emer_id: String,
|
||||
pub emer_id: EmergencyAccessId,
|
||||
pub grantor_name: String,
|
||||
pub grantor_email: String,
|
||||
}
|
||||
|
||||
pub fn generate_emergency_access_invite_claims(
|
||||
uuid: String,
|
||||
user_id: UserId,
|
||||
email: String,
|
||||
emer_id: String,
|
||||
emer_id: EmergencyAccessId,
|
||||
grantor_name: String,
|
||||
grantor_email: String,
|
||||
) -> EmergencyAccessInviteJwtClaims {
|
||||
@@ -246,7 +250,7 @@ pub fn generate_emergency_access_invite_claims(
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
sub: user_id,
|
||||
email,
|
||||
emer_id,
|
||||
grantor_name,
|
||||
@@ -263,21 +267,24 @@ pub struct OrgApiKeyLoginJwtClaims {
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
pub sub: OrgApiKeyId,
|
||||
|
||||
pub client_id: String,
|
||||
pub client_sub: String,
|
||||
pub client_sub: OrganizationId,
|
||||
pub scope: Vec<String>,
|
||||
}
|
||||
|
||||
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
|
||||
pub fn generate_organization_api_key_login_claims(
|
||||
org_api_key_uuid: OrgApiKeyId,
|
||||
org_id: OrganizationId,
|
||||
) -> OrgApiKeyLoginJwtClaims {
|
||||
let time_now = Utc::now();
|
||||
OrgApiKeyLoginJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
|
||||
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
client_id: format!("organization.{org_id}"),
|
||||
sub: org_api_key_uuid,
|
||||
client_id: format!("organization.{}", org_id),
|
||||
client_sub: org_id,
|
||||
scope: vec!["api.organization".into()],
|
||||
}
|
||||
@@ -292,18 +299,18 @@ pub struct FileDownloadClaims {
|
||||
// Issuer
|
||||
pub iss: String,
|
||||
// Subject
|
||||
pub sub: String,
|
||||
pub sub: CipherId,
|
||||
|
||||
pub file_id: String,
|
||||
pub file_id: AttachmentId,
|
||||
}
|
||||
|
||||
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
|
||||
pub fn generate_file_download_claims(cipher_id: CipherId, file_id: AttachmentId) -> FileDownloadClaims {
|
||||
let time_now = Utc::now();
|
||||
FileDownloadClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(),
|
||||
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
sub: cipher_id,
|
||||
file_id,
|
||||
}
|
||||
}
|
||||
@@ -331,14 +338,14 @@ pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
||||
pub fn generate_verify_email_claims(user_id: UserId) -> BasicJwtClaims {
|
||||
let time_now = Utc::now();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
sub: user_id.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,7 +359,7 @@ pub fn generate_admin_claims() -> BasicJwtClaims {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||
pub fn generate_send_claims(send_id: &SendId, file_id: &SendFileId) -> BasicJwtClaims {
|
||||
let time_now = Utc::now();
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
@@ -371,7 +378,7 @@ use rocket::{
|
||||
};
|
||||
|
||||
use crate::db::{
|
||||
models::{Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
|
||||
models::{Collection, Device, Membership, MembershipStatus, MembershipType, User, UserStampException},
|
||||
DbConn,
|
||||
};
|
||||
|
||||
@@ -471,36 +478,32 @@ impl<'r> FromRequest<'r> for Headers {
|
||||
};
|
||||
|
||||
// Check JWT token is valid and get device and user from it
|
||||
let claims = match decode_login(access_token) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => err_handler!("Invalid claim"),
|
||||
let Ok(claims) = decode_login(access_token) else {
|
||||
err_handler!("Invalid claim")
|
||||
};
|
||||
|
||||
let device_uuid = claims.device;
|
||||
let user_uuid = claims.sub;
|
||||
let device_id = claims.device;
|
||||
let user_id = claims.sub;
|
||||
|
||||
let mut conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &mut conn).await {
|
||||
Some(device) => device,
|
||||
None => err_handler!("Invalid device id"),
|
||||
let Some(device) = Device::find_by_uuid_and_user(&device_id, &user_id, &mut conn).await else {
|
||||
err_handler!("Invalid device id")
|
||||
};
|
||||
|
||||
let user = match User::find_by_uuid(&user_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err_handler!("Device has no user associated"),
|
||||
let Some(user) = User::find_by_uuid(&user_id, &mut conn).await else {
|
||||
err_handler!("Device has no user associated")
|
||||
};
|
||||
|
||||
if user.security_stamp != claims.sstamp {
|
||||
if let Some(stamp_exception) =
|
||||
user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
{
|
||||
let current_route = match request.route().and_then(|r| r.name.as_deref()) {
|
||||
Some(name) => name,
|
||||
_ => err_handler!("Error getting current route for stamp exception"),
|
||||
let Some(current_route) = request.route().and_then(|r| r.name.as_deref()) else {
|
||||
err_handler!("Error getting current route for stamp exception")
|
||||
};
|
||||
|
||||
// Check if the stamp exception has expired first.
|
||||
@@ -538,11 +541,30 @@ pub struct OrgHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub org_user: UserOrganization,
|
||||
pub membership_type: MembershipType,
|
||||
pub membership_status: MembershipStatus,
|
||||
pub membership: Membership,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
impl OrgHeaders {
|
||||
fn is_member(&self) -> bool {
|
||||
// NOTE: we don't care about MembershipStatus at the moment because this is only used
|
||||
// where an invited, accepted or confirmed user is expected if this ever changes or
|
||||
// if from_i32 is changed to return Some(Revoked) this check needs to be changed accordingly
|
||||
self.membership_type >= MembershipType::User
|
||||
}
|
||||
fn is_confirmed_and_admin(&self) -> bool {
|
||||
self.membership_status == MembershipStatus::Confirmed && self.membership_type >= MembershipType::Admin
|
||||
}
|
||||
fn is_confirmed_and_manager(&self) -> bool {
|
||||
self.membership_status == MembershipStatus::Confirmed && self.membership_type >= MembershipType::Manager
|
||||
}
|
||||
fn is_confirmed_and_owner(&self) -> bool {
|
||||
self.membership_status == MembershipStatus::Confirmed && self.membership_type == MembershipType::Owner
|
||||
}
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
type Error = &'static str;
|
||||
@@ -553,55 +575,50 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
let url_org_id: Option<&str> = {
|
||||
let mut url_org_id = None;
|
||||
if let Some(Ok(org_id)) = request.param::<&str>(1) {
|
||||
if uuid::Uuid::parse_str(org_id).is_ok() {
|
||||
url_org_id = Some(org_id);
|
||||
}
|
||||
let url_org_id: Option<OrganizationId> = {
|
||||
if let Some(Ok(org_id)) = request.param::<OrganizationId>(1) {
|
||||
Some(org_id.clone())
|
||||
} else if let Some(Ok(org_id)) = request.query_value::<OrganizationId>("organizationId") {
|
||||
Some(org_id.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
if let Some(Ok(org_id)) = request.query_value::<&str>("organizationId") {
|
||||
if uuid::Uuid::parse_str(org_id).is_ok() {
|
||||
url_org_id = Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
url_org_id
|
||||
};
|
||||
|
||||
match url_org_id {
|
||||
Some(org_id) => {
|
||||
Some(org_id) if uuid::Uuid::parse_str(&org_id).is_ok() => {
|
||||
let mut conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
let user = headers.user;
|
||||
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn).await {
|
||||
Some(user) => {
|
||||
if user.status == UserOrgStatus::Confirmed as i32 {
|
||||
user
|
||||
} else {
|
||||
err_handler!("The current user isn't confirmed member of the organization")
|
||||
}
|
||||
}
|
||||
None => err_handler!("The current user isn't member of the organization"),
|
||||
let Some(membership) = Membership::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await else {
|
||||
err_handler!("The current user isn't member of the organization");
|
||||
};
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user,
|
||||
org_user_type: {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
||||
org_usr_type
|
||||
membership_type: {
|
||||
if let Some(member_type) = MembershipType::from_i32(membership.atype) {
|
||||
member_type
|
||||
} else {
|
||||
// This should only happen if the DB is corrupted
|
||||
err_handler!("Unknown user type in the database")
|
||||
}
|
||||
},
|
||||
org_user,
|
||||
membership_status: {
|
||||
if let Some(member_status) = MembershipStatus::from_i32(membership.status) {
|
||||
// NOTE: add additional check for revoked if from_i32 is ever changed
|
||||
// to return Revoked status.
|
||||
member_status
|
||||
} else {
|
||||
err_handler!("User status is either revoked or invalid.")
|
||||
}
|
||||
},
|
||||
membership,
|
||||
ip: headers.ip,
|
||||
})
|
||||
}
|
||||
@@ -614,9 +631,9 @@ pub struct AdminHeaders {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub client_version: Option<String>,
|
||||
pub membership_type: MembershipType,
|
||||
pub ip: ClientIp,
|
||||
pub org_id: OrganizationId,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -625,15 +642,14 @@ impl<'r> FromRequest<'r> for AdminHeaders {
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
let client_version = request.headers().get_one("Bitwarden-Client-Version").map(String::from);
|
||||
if headers.org_user_type >= UserOrgType::Admin {
|
||||
if headers.is_confirmed_and_admin() {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
client_version,
|
||||
membership_type: headers.membership_type,
|
||||
ip: headers.ip,
|
||||
org_id: headers.membership.org_uuid,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||
@@ -655,16 +671,16 @@ impl From<AdminHeaders> for Headers {
|
||||
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
||||
// but there could be cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_col_id(request: &Request<'_>) -> Option<String> {
|
||||
fn get_col_id(request: &Request<'_>) -> Option<CollectionId> {
|
||||
if let Some(Ok(col_id)) = request.param::<String>(3) {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
return Some(col_id.into());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(col_id)) = request.query_value::<String>("collectionId") {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
return Some(col_id.into());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -679,6 +695,7 @@ pub struct ManagerHeaders {
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub ip: ClientIp,
|
||||
pub org_id: OrganizationId,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -687,7 +704,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
if headers.is_confirmed_and_manager() {
|
||||
match get_col_id(request) {
|
||||
Some(col_id) => {
|
||||
let mut conn = match DbConn::from_request(request).await {
|
||||
@@ -695,7 +712,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
||||
if !Collection::can_access_collection(&headers.membership, &col_id, &mut conn).await {
|
||||
err_handler!("The current user isn't a manager for this collection")
|
||||
}
|
||||
}
|
||||
@@ -707,6 +724,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
ip: headers.ip,
|
||||
org_id: headers.membership.org_uuid,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
@@ -731,7 +749,7 @@ pub struct ManagerHeadersLoose {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user: UserOrganization,
|
||||
pub membership: Membership,
|
||||
pub ip: ClientIp,
|
||||
}
|
||||
|
||||
@@ -741,12 +759,12 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
if headers.is_confirmed_and_manager() {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user: headers.org_user,
|
||||
membership: headers.membership,
|
||||
ip: headers.ip,
|
||||
})
|
||||
} else {
|
||||
@@ -769,14 +787,14 @@ impl From<ManagerHeadersLoose> for Headers {
|
||||
impl ManagerHeaders {
|
||||
pub async fn from_loose(
|
||||
h: ManagerHeadersLoose,
|
||||
collections: &Vec<String>,
|
||||
collections: &Vec<CollectionId>,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<ManagerHeaders, Error> {
|
||||
for col_id in collections {
|
||||
if uuid::Uuid::parse_str(col_id).is_err() {
|
||||
if uuid::Uuid::parse_str(col_id.as_ref()).is_err() {
|
||||
err!("Collection Id is malformed!");
|
||||
}
|
||||
if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
|
||||
if !Collection::can_access_collection(&h.membership, col_id, conn).await {
|
||||
err!("You don't have access to all collections!");
|
||||
}
|
||||
}
|
||||
@@ -786,6 +804,7 @@ impl ManagerHeaders {
|
||||
device: h.device,
|
||||
user: h.user,
|
||||
ip: h.ip,
|
||||
org_id: h.membership.org_uuid,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -794,6 +813,7 @@ pub struct OwnerHeaders {
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub ip: ClientIp,
|
||||
pub org_id: OrganizationId,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
@@ -802,11 +822,12 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type == UserOrgType::Owner {
|
||||
if headers.is_confirmed_and_owner() {
|
||||
Outcome::Success(Self {
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
ip: headers.ip,
|
||||
org_id: headers.membership.org_uuid,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Owner to call this endpoint")
|
||||
@@ -814,6 +835,30 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OrgMemberHeaders {
|
||||
pub host: String,
|
||||
pub user: User,
|
||||
pub org_id: OrganizationId,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for OrgMemberHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.is_member() {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
user: headers.user,
|
||||
org_id: headers.membership.org_uuid,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Member of the Organization to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Client IP address detection
|
||||
//
|
||||
@@ -900,3 +945,24 @@ impl<'r> FromRequest<'r> for WsAccessTokenHeader {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ClientVersion(pub semver::Version);
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for ClientVersion {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
let Some(version) = headers.get_one("Bitwarden-Client-Version") else {
|
||||
err_handler!("No Bitwarden-Client-Version header provided")
|
||||
};
|
||||
|
||||
let Ok(version) = semver::Version::parse(version) else {
|
||||
err_handler!("Invalid Bitwarden-Client-Version header provided")
|
||||
};
|
||||
|
||||
Outcome::Success(ClientVersion(version))
|
||||
}
|
||||
}
|
||||
|
||||
141
src/config.rs
141
src/config.rs
@@ -1,8 +1,10 @@
|
||||
use std::env::consts::EXE_SUFFIX;
|
||||
use std::process::exit;
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
RwLock,
|
||||
use std::{
|
||||
env::consts::EXE_SUFFIX,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
use job_scheduler_ng::Schedule;
|
||||
@@ -12,7 +14,7 @@ use reqwest::Url;
|
||||
use crate::{
|
||||
db::DbConnType,
|
||||
error::Error,
|
||||
util::{get_env, get_env_bool, parse_experimental_client_feature_flags},
|
||||
util::{get_env, get_env_bool, get_web_vault_version, is_valid_email, parse_experimental_client_feature_flags},
|
||||
};
|
||||
|
||||
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
||||
@@ -114,6 +116,14 @@ macro_rules! make_config {
|
||||
serde_json::from_str(&config_str).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn clear_non_editable(&mut self) {
|
||||
$($(
|
||||
if !$editable {
|
||||
self.$name = None;
|
||||
}
|
||||
)+)+
|
||||
}
|
||||
|
||||
/// Merges the values of both builders into a new builder.
|
||||
/// If both have the same element, `other` wins.
|
||||
fn merge(&self, other: &Self, show_overrides: bool, overrides: &mut Vec<String>) -> Self {
|
||||
@@ -238,6 +248,7 @@ macro_rules! make_config {
|
||||
// Besides Pass, only String types will be masked via _privacy_mask.
|
||||
const PRIVACY_CONFIG: &[&str] = &[
|
||||
"allowed_iframe_ancestors",
|
||||
"allowed_connect_src",
|
||||
"database_url",
|
||||
"domain_origin",
|
||||
"domain_path",
|
||||
@@ -248,6 +259,7 @@ macro_rules! make_config {
|
||||
"smtp_from",
|
||||
"smtp_host",
|
||||
"smtp_username",
|
||||
"_smtp_img_src",
|
||||
];
|
||||
|
||||
let cfg = {
|
||||
@@ -497,11 +509,11 @@ make_config! {
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations for the password hash.
|
||||
/// The default for new users. If changed, it will be updated during login for existing users.
|
||||
password_iterations: i32, true, def, 600_000;
|
||||
/// Allow password hints |> Controls whether users can set password hints. This setting applies globally to all users.
|
||||
/// Allow password hints |> Controls whether users can set or show password hints. This setting applies globally to all users.
|
||||
password_hints_allowed: bool, true, def, true;
|
||||
/// Show password hint |> Controls whether a password hint should be shown directly in the web page
|
||||
/// if SMTP service is not configured. Not recommended for publicly-accessible instances as this
|
||||
/// provides unauthenticated access to potentially sensitive data.
|
||||
/// Show password hint (Know the risks!) |> Controls whether a password hint should be shown directly in the web page
|
||||
/// if SMTP service is not configured and password hints are allowed. Not recommended for publicly-accessible instances
|
||||
/// because this provides unauthenticated access to potentially sensitive data.
|
||||
show_password_hint: bool, true, def, false;
|
||||
|
||||
/// Admin token/Argon2 PHC |> The plain text token or Argon2 PHC string used to authenticate in this very same page. Changing it here will not deauthorize the current session!
|
||||
@@ -609,6 +621,9 @@ make_config! {
|
||||
/// Allowed iframe ancestors (Know the risks!) |> Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
||||
allowed_iframe_ancestors: String, true, def, String::new();
|
||||
|
||||
/// Allowed connect-src (Know the risks!) |> Allows other domains to URLs which can be loaded using script interfaces like the Forwarded email alias feature
|
||||
allowed_connect_src: String, true, def, String::new();
|
||||
|
||||
/// Seconds between login requests |> Number of seconds, on average, between login and 2FA requests from the same IP address before rate limiting kicks in
|
||||
login_ratelimit_seconds: u64, false, def, 60;
|
||||
/// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `login_ratelimit_seconds`. Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2
|
||||
@@ -655,9 +670,9 @@ make_config! {
|
||||
_enable_duo: bool, true, def, true;
|
||||
/// Attempt to use deprecated iframe-based Traditional Prompt (Duo WebSDK 2)
|
||||
duo_use_iframe: bool, false, def, false;
|
||||
/// Integration Key
|
||||
/// Client Id
|
||||
duo_ikey: String, true, option;
|
||||
/// Secret Key
|
||||
/// Client Secret
|
||||
duo_skey: Pass, true, option;
|
||||
/// Host
|
||||
duo_host: String, true, option;
|
||||
@@ -672,7 +687,7 @@ make_config! {
|
||||
/// Use Sendmail |> Whether to send mail via the `sendmail` command
|
||||
use_sendmail: bool, true, def, false;
|
||||
/// Sendmail Command |> Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||
sendmail_command: String, true, option;
|
||||
sendmail_command: String, false, option;
|
||||
/// Host
|
||||
smtp_host: String, true, option;
|
||||
/// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY
|
||||
@@ -760,6 +775,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
);
|
||||
}
|
||||
|
||||
let connect_src = cfg.allowed_connect_src.to_lowercase();
|
||||
for url in connect_src.split_whitespace() {
|
||||
if !url.starts_with("https://") || Url::parse(url).is_err() {
|
||||
err!("ALLOWED_CONNECT_SRC variable contains one or more invalid URLs. Only FQDN's starting with https are allowed");
|
||||
}
|
||||
}
|
||||
|
||||
let whitelist = &cfg.signups_domains_whitelist;
|
||||
if !whitelist.is_empty() && whitelist.split(',').any(|d| d.trim().is_empty()) {
|
||||
err!("`SIGNUPS_DOMAINS_WHITELIST` contains empty tokens");
|
||||
@@ -811,8 +833,16 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
// TODO: deal with deprecated flags so they can be removed from this list, cf. #4263
|
||||
const KNOWN_FLAGS: &[&str] =
|
||||
&["autofill-overlay", "autofill-v2", "browser-fileless-import", "extension-refresh", "fido2-vault-credentials"];
|
||||
const KNOWN_FLAGS: &[&str] = &[
|
||||
"autofill-overlay",
|
||||
"autofill-v2",
|
||||
"browser-fileless-import",
|
||||
"extension-refresh",
|
||||
"fido2-vault-credentials",
|
||||
"inline-menu-positioning-improvements",
|
||||
"ssh-key-vault-item",
|
||||
"ssh-agent",
|
||||
];
|
||||
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
|
||||
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
|
||||
if !invalid_flags.is_empty() {
|
||||
@@ -873,12 +903,12 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
let command = cfg.sendmail_command.clone().unwrap_or_else(|| format!("sendmail{EXE_SUFFIX}"));
|
||||
|
||||
let mut path = std::path::PathBuf::from(&command);
|
||||
|
||||
// Check if we can find the sendmail command to execute when no absolute path is given
|
||||
if !path.is_absolute() {
|
||||
match which::which(&command) {
|
||||
Ok(result) => path = result,
|
||||
Err(_) => err!(format!("sendmail command {command:?} not found in $PATH")),
|
||||
}
|
||||
let Ok(which_path) = which::which(&command) else {
|
||||
err!(format!("sendmail command {command} not found in $PATH"))
|
||||
};
|
||||
path = which_path;
|
||||
}
|
||||
|
||||
match path.metadata() {
|
||||
@@ -912,8 +942,8 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !cfg.smtp_from.contains('@') {
|
||||
err!("SMTP_FROM does not contain a mandatory @ sign")
|
||||
if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !is_valid_email(&cfg.smtp_from) {
|
||||
err!(format!("SMTP_FROM '{}' is not a valid email address", cfg.smtp_from))
|
||||
}
|
||||
|
||||
if cfg._enable_email_2fa && cfg.email_token_size < 6 {
|
||||
@@ -1126,12 +1156,17 @@ impl Config {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_config(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
pub fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
|
||||
// Remove default values
|
||||
//let builder = other.remove(&self.inner.read().unwrap()._env);
|
||||
|
||||
// TODO: Remove values that are defaults, above only checks those set by env and not the defaults
|
||||
let builder = other;
|
||||
let mut builder = other;
|
||||
|
||||
// Remove values that are not editable
|
||||
if ignore_non_editable {
|
||||
builder.clear_non_editable();
|
||||
}
|
||||
|
||||
// Serialize now before we consume the builder
|
||||
let config_str = serde_json::to_string_pretty(&builder)?;
|
||||
@@ -1166,7 +1201,7 @@ impl Config {
|
||||
let mut _overrides = Vec::new();
|
||||
usr.merge(&other, false, &mut _overrides)
|
||||
};
|
||||
self.update_config(builder)
|
||||
self.update_config(builder, false)
|
||||
}
|
||||
|
||||
/// Tests whether an email's domain is allowed. A domain is allowed if it
|
||||
@@ -1269,11 +1304,16 @@ impl Config {
|
||||
let hb = load_templates(CONFIG.templates_folder());
|
||||
hb.render(name, data).map_err(Into::into)
|
||||
} else {
|
||||
let hb = &CONFIG.inner.read().unwrap().templates;
|
||||
let hb = &self.inner.read().unwrap().templates;
|
||||
hb.render(name, data).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn render_fallback_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
|
||||
let hb = &self.inner.read().unwrap().templates;
|
||||
hb.render(&format!("fallback_{name}"), data).map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn set_rocket_shutdown_handle(&self, handle: rocket::Shutdown) {
|
||||
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
|
||||
}
|
||||
@@ -1302,6 +1342,8 @@ where
|
||||
// Register helpers
|
||||
hb.register_helper("case", Box::new(case_helper));
|
||||
hb.register_helper("to_json", Box::new(to_json));
|
||||
hb.register_helper("webver", Box::new(webver));
|
||||
hb.register_helper("vwver", Box::new(vwver));
|
||||
|
||||
macro_rules! reg {
|
||||
($name:expr) => {{
|
||||
@@ -1312,6 +1354,11 @@ where
|
||||
reg!($name);
|
||||
reg!(concat!($name, $ext));
|
||||
}};
|
||||
(@withfallback $name:expr) => {{
|
||||
let template = include_str!(concat!("static/templates/", $name, ".hbs"));
|
||||
hb.register_template_string($name, template).unwrap();
|
||||
hb.register_template_string(concat!("fallback_", $name), template).unwrap();
|
||||
}};
|
||||
}
|
||||
|
||||
// First register default templates here
|
||||
@@ -1355,6 +1402,9 @@ where
|
||||
|
||||
reg!("404");
|
||||
|
||||
reg!(@withfallback "scss/vaultwarden.scss");
|
||||
reg!("scss/user.vaultwarden.scss");
|
||||
|
||||
// And then load user templates to overwrite the defaults
|
||||
// Use .hbs extension for the files
|
||||
// Templates get registered with their relative name
|
||||
@@ -1397,3 +1447,42 @@ fn to_json<'reg, 'rc>(
|
||||
out.write(&json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then.
|
||||
// The default is based upon the version since this feature is added.
|
||||
static WEB_VAULT_VERSION: Lazy<semver::Version> = Lazy::new(|| {
|
||||
let vault_version = get_web_vault_version();
|
||||
// Use a single regex capture to extract version components
|
||||
let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap();
|
||||
re.captures(&vault_version)
|
||||
.and_then(|c| {
|
||||
(c.len() == 4).then(|| {
|
||||
format!("{}.{}.{}", c.get(1).unwrap().as_str(), c.get(2).unwrap().as_str(), c.get(3).unwrap().as_str())
|
||||
})
|
||||
})
|
||||
.and_then(|v| semver::Version::parse(&v).ok())
|
||||
.unwrap_or_else(|| semver::Version::parse("2024.6.2").unwrap())
|
||||
});
|
||||
|
||||
// Configure the Vaultwarden version as an integer so it can be used as a comparison smaller or greater then.
|
||||
// The default is based upon the version since this feature is added.
|
||||
static VW_VERSION: Lazy<semver::Version> = Lazy::new(|| {
|
||||
let vw_version = crate::VERSION.unwrap_or("1.32.5");
|
||||
// Use a single regex capture to extract version components
|
||||
let re = regex::Regex::new(r"(\d{1})\.(\d{1,2})\.(\d{1,2})").unwrap();
|
||||
re.captures(vw_version)
|
||||
.and_then(|c| {
|
||||
(c.len() == 4).then(|| {
|
||||
format!("{}.{}.{}", c.get(1).unwrap().as_str(), c.get(2).unwrap().as_str(), c.get(3).unwrap().as_str())
|
||||
})
|
||||
})
|
||||
.and_then(|v| semver::Version::parse(&v).ok())
|
||||
.unwrap_or_else(|| semver::Version::parse("1.32.5").unwrap())
|
||||
});
|
||||
|
||||
handlebars::handlebars_helper!(webver: | web_vault_version: String |
|
||||
semver::VersionReq::parse(&web_vault_version).expect("Invalid web-vault version compare string").matches(&WEB_VAULT_VERSION)
|
||||
);
|
||||
handlebars::handlebars_helper!(vwver: | vw_version: String |
|
||||
semver::VersionReq::parse(&vw_version).expect("Invalid Vaultwarden version compare string").matches(&VW_VERSION)
|
||||
);
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::num::NonZeroU32;
|
||||
use data_encoding::{Encoding, HEXLOWER};
|
||||
use ring::{digest, hmac, pbkdf2};
|
||||
|
||||
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
|
||||
const DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
|
||||
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
||||
|
||||
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
|
||||
@@ -56,11 +56,11 @@ pub fn encode_random_bytes<const N: usize>(e: Encoding) -> String {
|
||||
pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
|
||||
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
|
||||
use rand::Rng;
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
(0..num_chars)
|
||||
.map(|_| {
|
||||
let i = rng.gen_range(0..alphabet.len());
|
||||
let i = rng.random_range(0..alphabet.len());
|
||||
alphabet[i] as char
|
||||
})
|
||||
.collect()
|
||||
@@ -84,14 +84,15 @@ pub fn generate_id<const N: usize>() -> String {
|
||||
encode_random_bytes::<N>(HEXLOWER)
|
||||
}
|
||||
|
||||
pub fn generate_send_id() -> String {
|
||||
// Send IDs are globally scoped, so make them longer to avoid collisions.
|
||||
pub fn generate_send_file_id() -> String {
|
||||
// Send File IDs are globally scoped, so make them longer to avoid collisions.
|
||||
generate_id::<32>() // 256 bits
|
||||
}
|
||||
|
||||
pub fn generate_attachment_id() -> String {
|
||||
use crate::db::models::AttachmentId;
|
||||
pub fn generate_attachment_id() -> AttachmentId {
|
||||
// Attachment IDs are scoped to a cipher, so they can be smaller.
|
||||
generate_id::<10>() // 80 bits
|
||||
AttachmentId(generate_id::<10>()) // 80 bits
|
||||
}
|
||||
|
||||
/// Generates a numeric token for email-based verifications.
|
||||
|
||||
@@ -373,24 +373,18 @@ pub async fn backup_database(conn: &mut DbConn) -> Result<String, Error> {
|
||||
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||
}
|
||||
sqlite {
|
||||
backup_sqlite_database(conn)
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = std::path::Path::new(&db_url).parent().unwrap();
|
||||
let backup_file = db_path
|
||||
.join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S")))
|
||||
.to_string_lossy()
|
||||
.into_owned();
|
||||
diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?;
|
||||
Ok(backup_file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(sqlite)]
|
||||
pub fn backup_sqlite_database(conn: &mut diesel::sqlite::SqliteConnection) -> Result<String, Error> {
|
||||
use diesel::RunQueryDsl;
|
||||
let db_url = CONFIG.database_url();
|
||||
let db_path = std::path::Path::new(&db_url).parent().unwrap();
|
||||
let backup_file = db_path
|
||||
.join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S")))
|
||||
.to_string_lossy()
|
||||
.into_owned();
|
||||
diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?;
|
||||
Ok(backup_file)
|
||||
}
|
||||
|
||||
/// Get the SQL Server version
|
||||
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
||||
db_run! {@raw conn:
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
use std::io::ErrorKind;
|
||||
|
||||
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||
use derive_more::{AsRef, Deref, Display};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{CipherId, OrganizationId, UserId};
|
||||
use crate::CONFIG;
|
||||
use macros::IdFromParam;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -11,8 +14,8 @@ db_object! {
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(id))]
|
||||
pub struct Attachment {
|
||||
pub id: String,
|
||||
pub cipher_uuid: String,
|
||||
pub id: AttachmentId,
|
||||
pub cipher_uuid: CipherId,
|
||||
pub file_name: String, // encrypted
|
||||
pub file_size: i64,
|
||||
pub akey: Option<String>,
|
||||
@@ -21,7 +24,13 @@ db_object! {
|
||||
|
||||
/// Local methods
|
||||
impl Attachment {
|
||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i64, akey: Option<String>) -> Self {
|
||||
pub const fn new(
|
||||
id: AttachmentId,
|
||||
cipher_uuid: CipherId,
|
||||
file_name: String,
|
||||
file_size: i64,
|
||||
akey: Option<String>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
cipher_uuid,
|
||||
@@ -117,14 +126,14 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||
for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await {
|
||||
attachment.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn find_by_id(id: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_id(id: &AttachmentId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::id.eq(id.to_lowercase()))
|
||||
@@ -134,7 +143,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
||||
@@ -144,7 +153,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<BigDecimal> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -161,7 +170,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn count_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -172,7 +181,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn size_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<BigDecimal> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -189,7 +198,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -203,7 +212,11 @@ impl Attachment {
|
||||
// This will return all attachments linked to the user or org
|
||||
// There is no filtering done here if the user actually has access!
|
||||
// It is used to speed up the sync process, and the matching is done in a different part.
|
||||
pub async fn find_all_by_user_and_orgs(user_uuid: &str, org_uuids: &Vec<String>, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_by_user_and_orgs(
|
||||
user_uuid: &UserId,
|
||||
org_uuids: &Vec<OrganizationId>,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -216,3 +229,20 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
AsRef,
|
||||
Deref,
|
||||
DieselNewType,
|
||||
Display,
|
||||
FromForm,
|
||||
Hash,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
IdFromParam,
|
||||
)]
|
||||
pub struct AttachmentId(pub String);
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
use crate::crypto::ct_eq;
|
||||
use super::{DeviceId, OrganizationId, UserId};
|
||||
use crate::{crypto::ct_eq, util::format_date};
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use derive_more::{AsRef, Deref, Display, From};
|
||||
use macros::UuidFromParam;
|
||||
use serde_json::Value;
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)]
|
||||
@@ -7,15 +11,15 @@ db_object! {
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct AuthRequest {
|
||||
pub uuid: String,
|
||||
pub user_uuid: String,
|
||||
pub organization_uuid: Option<String>,
|
||||
pub uuid: AuthRequestId,
|
||||
pub user_uuid: UserId,
|
||||
pub organization_uuid: Option<OrganizationId>,
|
||||
|
||||
pub request_device_identifier: String,
|
||||
pub request_device_identifier: DeviceId,
|
||||
pub device_type: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
||||
|
||||
pub request_ip: String,
|
||||
pub response_device_id: Option<String>,
|
||||
pub response_device_id: Option<DeviceId>,
|
||||
|
||||
pub access_code: String,
|
||||
pub public_key: String,
|
||||
@@ -33,8 +37,8 @@ db_object! {
|
||||
|
||||
impl AuthRequest {
|
||||
pub fn new(
|
||||
user_uuid: String,
|
||||
request_device_identifier: String,
|
||||
user_uuid: UserId,
|
||||
request_device_identifier: DeviceId,
|
||||
device_type: i32,
|
||||
request_ip: String,
|
||||
access_code: String,
|
||||
@@ -43,7 +47,7 @@ impl AuthRequest {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: AuthRequestId(crate::util::get_uuid()),
|
||||
user_uuid,
|
||||
organization_uuid: None,
|
||||
|
||||
@@ -61,6 +65,13 @@ impl AuthRequest {
|
||||
authentication_date: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_json_for_pending_device(&self) -> Value {
|
||||
json!({
|
||||
"id": self.uuid,
|
||||
"creationDate": format_date(&self.creation_date),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
@@ -101,7 +112,7 @@ impl AuthRequest {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &AuthRequestId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
auth_requests::table
|
||||
.filter(auth_requests::uuid.eq(uuid))
|
||||
@@ -111,7 +122,18 @@ impl AuthRequest {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &AuthRequestId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
auth_requests::table
|
||||
.filter(auth_requests::uuid.eq(uuid))
|
||||
.filter(auth_requests::user_uuid.eq(user_uuid))
|
||||
.first::<AuthRequestDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
auth_requests::table
|
||||
.filter(auth_requests::user_uuid.eq(user_uuid))
|
||||
@@ -119,6 +141,21 @@ impl AuthRequest {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user_and_requested_device(
|
||||
user_uuid: &UserId,
|
||||
device_uuid: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
auth_requests::table
|
||||
.filter(auth_requests::user_uuid.eq(user_uuid))
|
||||
.filter(auth_requests::request_device_identifier.eq(device_uuid))
|
||||
.filter(auth_requests::approved.is_null())
|
||||
.order_by(auth_requests::creation_date.desc())
|
||||
.first::<AuthRequestDb>(conn).ok().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_created_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
auth_requests::table
|
||||
@@ -146,3 +183,21 @@ impl AuthRequest {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
AsRef,
|
||||
Deref,
|
||||
DieselNewType,
|
||||
Display,
|
||||
From,
|
||||
FromForm,
|
||||
Hash,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
UuidFromParam,
|
||||
)]
|
||||
pub struct AuthRequestId(String);
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
use crate::util::LowerCase;
|
||||
use crate::CONFIG;
|
||||
use chrono::{DateTime, NaiveDateTime, TimeDelta, Utc};
|
||||
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||
use derive_more::{AsRef, Deref, Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{
|
||||
Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization,
|
||||
Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus,
|
||||
MembershipType, OrganizationId, User, UserId,
|
||||
};
|
||||
|
||||
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
|
||||
use macros::UuidFromParam;
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
@@ -17,12 +19,12 @@ db_object! {
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Cipher {
|
||||
pub uuid: String,
|
||||
pub uuid: CipherId,
|
||||
pub created_at: NaiveDateTime,
|
||||
pub updated_at: NaiveDateTime,
|
||||
|
||||
pub user_uuid: Option<String>,
|
||||
pub organization_uuid: Option<String>,
|
||||
pub user_uuid: Option<UserId>,
|
||||
pub organization_uuid: Option<OrganizationId>,
|
||||
|
||||
pub key: Option<String>,
|
||||
|
||||
@@ -30,7 +32,8 @@ db_object! {
|
||||
Login = 1,
|
||||
SecureNote = 2,
|
||||
Card = 3,
|
||||
Identity = 4
|
||||
Identity = 4,
|
||||
SshKey = 5
|
||||
*/
|
||||
pub atype: i32,
|
||||
pub name: String,
|
||||
@@ -45,10 +48,9 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub enum RepromptType {
|
||||
None = 0,
|
||||
Password = 1, // not currently used in server
|
||||
Password = 1,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
@@ -57,7 +59,7 @@ impl Cipher {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: CipherId(crate::util::get_uuid()),
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
|
||||
@@ -135,12 +137,12 @@ impl Cipher {
|
||||
pub async fn to_json(
|
||||
&self,
|
||||
host: &str,
|
||||
user_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
sync_type: CipherSyncType,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
use crate::util::format_date;
|
||||
use crate::util::{format_date, validate_and_format_date};
|
||||
|
||||
let mut attachments_json: Value = Value::Null;
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
@@ -156,16 +158,16 @@ impl Cipher {
|
||||
|
||||
// We don't need these values at all for Organizational syncs
|
||||
// Skip any other database calls if this is the case and just return false.
|
||||
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
|
||||
let (read_only, hide_passwords, _) = if sync_type == CipherSyncType::User {
|
||||
match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
Some((ro, hp, mn)) => (ro, hp, mn),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
(true, true)
|
||||
(true, true, false)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(false, false)
|
||||
(false, false, false)
|
||||
};
|
||||
|
||||
let fields_json: Vec<_> = self
|
||||
@@ -216,11 +218,13 @@ impl Cipher {
|
||||
Some(p) if p.is_string() => Some(d.data),
|
||||
_ => None,
|
||||
})
|
||||
.map(|d| match d.get("lastUsedDate").and_then(|l| l.as_str()) {
|
||||
Some(l) if DateTime::parse_from_rfc3339(l).is_ok() => d,
|
||||
.map(|mut d| match d.get("lastUsedDate").and_then(|l| l.as_str()) {
|
||||
Some(l) => {
|
||||
d["lastUsedDate"] = json!(validate_and_format_date(l));
|
||||
d
|
||||
}
|
||||
_ => {
|
||||
let mut d = d;
|
||||
d["lastUsedDate"] = json!("1970-01-01T00:00:00.000Z");
|
||||
d["lastUsedDate"] = json!("1970-01-01T00:00:00.000000Z");
|
||||
d
|
||||
}
|
||||
})
|
||||
@@ -239,12 +243,28 @@ impl Cipher {
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||
if self.atype == 1 {
|
||||
if type_data_json["uris"].is_array() {
|
||||
let uri = type_data_json["uris"][0]["uri"].clone();
|
||||
type_data_json["uri"] = uri;
|
||||
} else {
|
||||
// Upstream always has an Uri key/value
|
||||
type_data_json["uri"] = Value::Null;
|
||||
// Upstream always has an `uri` key/value
|
||||
type_data_json["uri"] = Value::Null;
|
||||
if let Some(uris) = type_data_json["uris"].as_array_mut() {
|
||||
if !uris.is_empty() {
|
||||
// Fix uri match values first, they are only allowed to be a number or null
|
||||
// If it is a string, convert it to an int or null if that fails
|
||||
for uri in &mut *uris {
|
||||
if uri["match"].is_string() {
|
||||
let match_value = match uri["match"].as_str().unwrap_or_default().parse::<u8>() {
|
||||
Ok(n) => json!(n),
|
||||
_ => Value::Null,
|
||||
};
|
||||
uri["match"] = match_value;
|
||||
}
|
||||
}
|
||||
type_data_json["uri"] = uris[0]["uri"].clone();
|
||||
}
|
||||
}
|
||||
|
||||
// Check if `passwordRevisionDate` is a valid date, else convert it
|
||||
if let Some(pw_revision) = type_data_json["passwordRevisionDate"].as_str() {
|
||||
type_data_json["passwordRevisionDate"] = json!(validate_and_format_date(pw_revision));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,6 +279,19 @@ impl Cipher {
|
||||
}
|
||||
}
|
||||
|
||||
// Fix invalid SSH Entries
|
||||
// This breaks at least the native mobile client if invalid
|
||||
// The only way to fix this is by setting type_data_json to `null`
|
||||
// Opening this ssh-key in the mobile client will probably crash the client, but you can edit, save and afterwards delete it
|
||||
if self.atype == 5
|
||||
&& (type_data_json["keyFingerprint"].as_str().is_none_or(|v| v.is_empty())
|
||||
|| type_data_json["privateKey"].as_str().is_none_or(|v| v.is_empty())
|
||||
|| type_data_json["publicKey"].as_str().is_none_or(|v| v.is_empty()))
|
||||
{
|
||||
warn!("Error parsing ssh-key, mandatory fields are invalid for {}", self.uuid);
|
||||
type_data_json = Value::Null;
|
||||
}
|
||||
|
||||
// Clone the type_data and add some default value.
|
||||
let mut data_json = type_data_json.clone();
|
||||
|
||||
@@ -276,7 +309,7 @@ impl Cipher {
|
||||
Cow::from(Vec::with_capacity(0))
|
||||
}
|
||||
} else {
|
||||
Cow::from(self.get_admin_collections(user_uuid.to_string(), conn).await)
|
||||
Cow::from(self.get_admin_collections(user_uuid.clone(), conn).await)
|
||||
};
|
||||
|
||||
// There are three types of cipher response models in upstream
|
||||
@@ -293,7 +326,7 @@ impl Cipher {
|
||||
"creationDate": format_date(&self.created_at),
|
||||
"revisionDate": format_date(&self.updated_at),
|
||||
"deletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"reprompt": self.reprompt.filter(|r| *r == RepromptType::None as i32 || *r == RepromptType::Password as i32).unwrap_or(RepromptType::None as i32),
|
||||
"organizationId": self.organization_uuid,
|
||||
"key": self.key,
|
||||
"attachments": attachments_json,
|
||||
@@ -317,6 +350,7 @@ impl Cipher {
|
||||
"secureNote": null,
|
||||
"card": null,
|
||||
"identity": null,
|
||||
"sshKey": null,
|
||||
});
|
||||
|
||||
// These values are only needed for user/default syncs
|
||||
@@ -324,7 +358,7 @@ impl Cipher {
|
||||
// Skip adding these fields in that case
|
||||
if sync_type == CipherSyncType::User {
|
||||
json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
|
||||
cipher_sync_data.cipher_folders.get(&self.uuid).cloned()
|
||||
} else {
|
||||
self.get_folder_uuid(user_uuid, conn).await
|
||||
});
|
||||
@@ -345,6 +379,7 @@ impl Cipher {
|
||||
2 => "secureNote",
|
||||
3 => "card",
|
||||
4 => "identity",
|
||||
5 => "sshKey",
|
||||
_ => panic!("Wrong type"),
|
||||
};
|
||||
|
||||
@@ -352,7 +387,7 @@ impl Cipher {
|
||||
json_object
|
||||
}
|
||||
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<String> {
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {
|
||||
let mut user_uuids = Vec::new();
|
||||
match self.user_uuid {
|
||||
Some(ref user_uuid) => {
|
||||
@@ -363,17 +398,16 @@ impl Cipher {
|
||||
// Belongs to Organization, need to update affected users
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
// users having access to the collection
|
||||
let mut collection_users =
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await;
|
||||
let mut collection_users = Membership::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await;
|
||||
if CONFIG.org_groups_enabled() {
|
||||
// members of a group having access to the collection
|
||||
let group_users =
|
||||
UserOrganization::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await;
|
||||
Membership::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await;
|
||||
collection_users.extend(group_users);
|
||||
}
|
||||
for user_org in collection_users {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
for member in collection_users {
|
||||
User::update_uuid_revision(&member.user_uuid, conn).await;
|
||||
user_uuids.push(member.user_uuid.clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -431,7 +465,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
|
||||
// TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching.
|
||||
for cipher in Self::find_by_org(org_uuid, conn).await {
|
||||
cipher.delete(conn).await?;
|
||||
@@ -439,7 +473,7 @@ impl Cipher {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_owned_by_user(user_uuid, conn).await {
|
||||
cipher.delete(conn).await?;
|
||||
}
|
||||
@@ -457,52 +491,59 @@ impl Cipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn move_to_folder(
|
||||
&self,
|
||||
folder_uuid: Option<FolderId>,
|
||||
user_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
|
||||
match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) {
|
||||
// No changes
|
||||
(None, None) => Ok(()),
|
||||
(Some(ref old), Some(ref new)) if old == new => Ok(()),
|
||||
(Some(ref old_folder), Some(ref new_folder)) if old_folder == new_folder => Ok(()),
|
||||
|
||||
// Add to folder
|
||||
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await,
|
||||
(None, Some(new_folder)) => FolderCipher::new(new_folder, self.uuid.clone()).save(conn).await,
|
||||
|
||||
// Remove from folder
|
||||
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
|
||||
Some(old) => old.delete(conn).await,
|
||||
None => err!("Couldn't move from previous folder"),
|
||||
},
|
||||
(Some(old_folder), None) => {
|
||||
match FolderCipher::find_by_folder_and_cipher(&old_folder, &self.uuid, conn).await {
|
||||
Some(old_folder) => old_folder.delete(conn).await,
|
||||
None => err!("Couldn't move from previous folder"),
|
||||
}
|
||||
}
|
||||
|
||||
// Move to another folder
|
||||
(Some(old), Some(new)) => {
|
||||
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
|
||||
old.delete(conn).await?;
|
||||
(Some(old_folder), Some(new_folder)) => {
|
||||
if let Some(old_folder) = FolderCipher::find_by_folder_and_cipher(&old_folder, &self.uuid, conn).await {
|
||||
old_folder.delete(conn).await?;
|
||||
}
|
||||
FolderCipher::new(&new, &self.uuid).save(conn).await
|
||||
FolderCipher::new(new_folder, self.uuid.clone()).save(conn).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether this cipher is directly owned by the user.
|
||||
pub fn is_owned_by_user(&self, user_uuid: &str) -> bool {
|
||||
pub fn is_owned_by_user(&self, user_uuid: &UserId) -> bool {
|
||||
self.user_uuid.is_some() && self.user_uuid.as_ref().unwrap() == user_uuid
|
||||
}
|
||||
|
||||
/// Returns whether this cipher is owned by an org in which the user has full access.
|
||||
async fn is_in_full_access_org(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(cached_user_org) = cipher_sync_data.user_organizations.get(org_uuid) {
|
||||
return cached_user_org.has_full_access();
|
||||
if let Some(cached_member) = cipher_sync_data.members.get(org_uuid) {
|
||||
return cached_member.has_full_access();
|
||||
}
|
||||
} else if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await {
|
||||
return user_org.has_full_access();
|
||||
} else if let Some(member) = Membership::find_by_user_and_org(user_uuid, org_uuid, conn).await {
|
||||
return member.has_full_access();
|
||||
}
|
||||
}
|
||||
false
|
||||
@@ -511,7 +552,7 @@ impl Cipher {
|
||||
/// Returns whether this cipher is owned by an group in which the user has full access.
|
||||
async fn is_in_full_access_group(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
@@ -531,14 +572,14 @@ impl Cipher {
|
||||
/// Returns the user's access restrictions to this cipher. A return value
|
||||
/// of None means that this cipher does not belong to the user, and is
|
||||
/// not in any collection the user has access to. Otherwise, the user has
|
||||
/// access to this cipher, and Some(read_only, hide_passwords) represents
|
||||
/// access to this cipher, and Some(read_only, hide_passwords, manage) represents
|
||||
/// the access restrictions.
|
||||
pub async fn get_access_restrictions(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<(bool, bool)> {
|
||||
) -> Option<(bool, bool, bool)> {
|
||||
// Check whether this cipher is directly owned by the user, or is in
|
||||
// a collection that the user has full access to. If so, there are no
|
||||
// access restrictions.
|
||||
@@ -546,21 +587,21 @@ impl Cipher {
|
||||
|| self.is_in_full_access_org(user_uuid, cipher_sync_data, conn).await
|
||||
|| self.is_in_full_access_group(user_uuid, cipher_sync_data, conn).await
|
||||
{
|
||||
return Some((false, false));
|
||||
return Some((false, false, true));
|
||||
}
|
||||
|
||||
let rows = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
let mut rows: Vec<(bool, bool)> = Vec::new();
|
||||
let mut rows: Vec<(bool, bool, bool)> = Vec::new();
|
||||
if let Some(collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
for collection in collections {
|
||||
//User permissions
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(collection) {
|
||||
rows.push((uc.read_only, uc.hide_passwords));
|
||||
if let Some(cu) = cipher_sync_data.user_collections.get(collection) {
|
||||
rows.push((cu.read_only, cu.hide_passwords, cu.manage));
|
||||
}
|
||||
|
||||
//Group permissions
|
||||
if let Some(cg) = cipher_sync_data.user_collections_groups.get(collection) {
|
||||
rows.push((cg.read_only, cg.hide_passwords));
|
||||
rows.push((cg.read_only, cg.hide_passwords, cg.manage));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -587,15 +628,21 @@ impl Cipher {
|
||||
// booleans and this behavior isn't portable anyway.
|
||||
let mut read_only = true;
|
||||
let mut hide_passwords = true;
|
||||
for (ro, hp) in rows.iter() {
|
||||
let mut manage = false;
|
||||
for (ro, hp, mn) in rows.iter() {
|
||||
read_only &= ro;
|
||||
hide_passwords &= hp;
|
||||
manage &= mn;
|
||||
}
|
||||
|
||||
Some((read_only, hide_passwords))
|
||||
Some((read_only, hide_passwords, manage))
|
||||
}
|
||||
|
||||
async fn get_user_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> {
|
||||
async fn get_user_collections_access_flags(
|
||||
&self,
|
||||
user_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<(bool, bool, bool)> {
|
||||
db_run! {conn: {
|
||||
// Check whether this cipher is in any collections accessible to the
|
||||
// user. If so, retrieve the access flags for each collection.
|
||||
@@ -606,13 +653,17 @@ impl Cipher {
|
||||
.inner_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_uuid))))
|
||||
.select((users_collections::read_only, users_collections::hide_passwords))
|
||||
.load::<(bool, bool)>(conn)
|
||||
.select((users_collections::read_only, users_collections::hide_passwords, users_collections::manage))
|
||||
.load::<(bool, bool, bool)>(conn)
|
||||
.expect("Error getting user access restrictions")
|
||||
}}
|
||||
}
|
||||
|
||||
async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> {
|
||||
async fn get_group_collections_access_flags(
|
||||
&self,
|
||||
user_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<(bool, bool, bool)> {
|
||||
if !CONFIG.org_groups_enabled() {
|
||||
return Vec::new();
|
||||
}
|
||||
@@ -632,49 +683,49 @@ impl Cipher {
|
||||
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
|
||||
))
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.select((collections_groups::read_only, collections_groups::hide_passwords))
|
||||
.load::<(bool, bool)>(conn)
|
||||
.select((collections_groups::read_only, collections_groups::hide_passwords, collections_groups::manage))
|
||||
.load::<(bool, bool, bool)>(conn)
|
||||
.expect("Error getting group access restrictions")
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn is_write_accessible_to_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
match self.get_access_restrictions(user_uuid, None, conn).await {
|
||||
Some((read_only, _hide_passwords)) => !read_only,
|
||||
Some((read_only, _hide_passwords, manage)) => !read_only || manage,
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn is_accessible_to_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
self.get_access_restrictions(user_uuid, None, conn).await.is_some()
|
||||
}
|
||||
|
||||
// Returns whether this cipher is a favorite of the specified user.
|
||||
pub async fn is_favorite(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn is_favorite(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
Favorite::is_favorite(&self.uuid, user_uuid, conn).await
|
||||
}
|
||||
|
||||
// Sets whether this cipher is a favorite of the specified user.
|
||||
pub async fn set_favorite(&self, favorite: Option<bool>, user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn set_favorite(&self, favorite: Option<bool>, user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
match favorite {
|
||||
None => Ok(()), // No change requested.
|
||||
Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_folder_uuid(&self, user_uuid: &str, conn: &mut DbConn) -> Option<String> {
|
||||
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &mut DbConn) -> Option<FolderId> {
|
||||
db_run! {conn: {
|
||||
folders_ciphers::table
|
||||
.inner_join(folders::table)
|
||||
.filter(folders::user_uuid.eq(&user_uuid))
|
||||
.filter(folders_ciphers::cipher_uuid.eq(&self.uuid))
|
||||
.select(folders_ciphers::folder_uuid)
|
||||
.first::<String>(conn)
|
||||
.first::<FolderId>(conn)
|
||||
.ok()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &CipherId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(uuid))
|
||||
@@ -684,7 +735,11 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_org(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_org(
|
||||
cipher_uuid: &CipherId,
|
||||
org_uuid: &OrganizationId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(cipher_uuid))
|
||||
@@ -707,7 +762,7 @@ impl Cipher {
|
||||
// true, then the non-interesting ciphers will not be returned. As a
|
||||
// result, those ciphers will not appear in "My Vault" for the org
|
||||
// owner/admin, but they can still be accessed via the org vault view.
|
||||
pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &UserId, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! {conn: {
|
||||
let mut query = ciphers::table
|
||||
@@ -717,7 +772,7 @@ impl Cipher {
|
||||
.left_join(users_organizations::table.on(
|
||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
.and(users_organizations::status.eq(MembershipStatus::Confirmed as i32))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
@@ -744,7 +799,7 @@ impl Cipher {
|
||||
|
||||
if !visible_only {
|
||||
query = query.or_filter(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
|
||||
);
|
||||
}
|
||||
|
||||
@@ -762,7 +817,7 @@ impl Cipher {
|
||||
.left_join(users_organizations::table.on(
|
||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
.and(users_organizations::status.eq(MembershipStatus::Confirmed as i32))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
@@ -776,7 +831,7 @@ impl Cipher {
|
||||
|
||||
if !visible_only {
|
||||
query = query.or_filter(
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
|
||||
);
|
||||
}
|
||||
|
||||
@@ -789,12 +844,12 @@ impl Cipher {
|
||||
}
|
||||
|
||||
// Find all ciphers visible to the specified user.
|
||||
pub async fn find_by_user_visible(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user_visible(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
Self::find_by_user(user_uuid, true, conn).await
|
||||
}
|
||||
|
||||
// Find all ciphers directly owned by the specified user.
|
||||
pub async fn find_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_owned_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(
|
||||
@@ -805,7 +860,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn count_owned_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||
@@ -816,7 +871,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
@@ -824,7 +879,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
@@ -835,7 +890,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
folders_ciphers::table.inner_join(ciphers::table)
|
||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||
@@ -853,7 +908,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
|
||||
pub async fn get_collections(&self, user_uuid: UserId, conn: &mut DbConn) -> Vec<CollectionId> {
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
@@ -863,11 +918,11 @@ impl Cipher {
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||
.and(users_collections::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
@@ -878,14 +933,14 @@ impl Cipher {
|
||||
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||
))
|
||||
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||
.or(users_collections::user_uuid.eq(user_uuid) // User has access to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
.or(groups::access_all.eq(true)) // Access via groups
|
||||
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
|
||||
.and(collections_groups::read_only.eq(false)))
|
||||
)
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
.load::<CollectionId>(conn).unwrap_or_default()
|
||||
}}
|
||||
} else {
|
||||
db_run! {conn: {
|
||||
@@ -896,23 +951,23 @@ impl Cipher {
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||
.and(users_collections::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||
.or(users_collections::user_uuid.eq(user_uuid) // User has access to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
)
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
.load::<CollectionId>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_admin_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
|
||||
pub async fn get_admin_collections(&self, user_uuid: UserId, conn: &mut DbConn) -> Vec<CollectionId> {
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
@@ -922,11 +977,11 @@ impl Cipher {
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||
.and(users_collections::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
@@ -937,15 +992,15 @@ impl Cipher {
|
||||
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||
))
|
||||
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||
.or(users_collections::user_uuid.eq(user_uuid) // User has access to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
.or(groups::access_all.eq(true)) // Access via groups
|
||||
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
|
||||
.and(collections_groups::read_only.eq(false)))
|
||||
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
|
||||
.or(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner
|
||||
)
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
.load::<CollectionId>(conn).unwrap_or_default()
|
||||
}}
|
||||
} else {
|
||||
db_run! {conn: {
|
||||
@@ -956,26 +1011,29 @@ impl Cipher {
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||
.and(users_collections::user_uuid.eq(user_uuid.clone()))
|
||||
))
|
||||
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||
.or(users_collections::user_uuid.eq(user_uuid) // User has access to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
|
||||
.or(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner
|
||||
)
|
||||
.select(ciphers_collections::collection_uuid)
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
.load::<CollectionId>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a Vec with (cipher_uuid, collection_uuid)
|
||||
/// This is used during a full sync so we only need one query for all collections accessible.
|
||||
pub async fn get_collections_with_cipher_by_user(user_id: String, conn: &mut DbConn) -> Vec<(String, String)> {
|
||||
pub async fn get_collections_with_cipher_by_user(
|
||||
user_uuid: UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<(CipherId, CollectionId)> {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.inner_join(collections::table.on(
|
||||
@@ -983,12 +1041,12 @@ impl Cipher {
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_id.clone())
|
||||
users_organizations::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
users_collections::user_uuid.eq(user_id.clone())
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
@@ -1002,14 +1060,32 @@ impl Cipher {
|
||||
collections_groups::groups_uuid.eq(groups::uuid)
|
||||
)
|
||||
))
|
||||
.or_filter(users_collections::user_uuid.eq(user_id)) // User has access to collection
|
||||
.or_filter(users_collections::user_uuid.eq(user_uuid)) // User has access to collection
|
||||
.or_filter(users_organizations::access_all.eq(true)) // User has access all
|
||||
.or_filter(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
|
||||
.or_filter(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner
|
||||
.or_filter(groups::access_all.eq(true)) //Access via group
|
||||
.or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group
|
||||
.select(ciphers_collections::all_columns)
|
||||
.distinct()
|
||||
.load::<(String, String)>(conn).unwrap_or_default()
|
||||
.load::<(CipherId, CollectionId)>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
AsRef,
|
||||
Deref,
|
||||
DieselNewType,
|
||||
Display,
|
||||
From,
|
||||
FromForm,
|
||||
Hash,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
UuidFromParam,
|
||||
)]
|
||||
pub struct CipherId(String);
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
use derive_more::{AsRef, Deref, Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use super::{
|
||||
CipherId, CollectionGroup, GroupUser, Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId,
|
||||
User, UserId,
|
||||
};
|
||||
use crate::CONFIG;
|
||||
use macros::UuidFromParam;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = collections)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Collection {
|
||||
pub uuid: String,
|
||||
pub org_uuid: String,
|
||||
pub uuid: CollectionId,
|
||||
pub org_uuid: OrganizationId,
|
||||
pub name: String,
|
||||
pub external_id: Option<String>,
|
||||
}
|
||||
@@ -18,26 +23,27 @@ db_object! {
|
||||
#[diesel(table_name = users_collections)]
|
||||
#[diesel(primary_key(user_uuid, collection_uuid))]
|
||||
pub struct CollectionUser {
|
||||
pub user_uuid: String,
|
||||
pub collection_uuid: String,
|
||||
pub user_uuid: UserId,
|
||||
pub collection_uuid: CollectionId,
|
||||
pub read_only: bool,
|
||||
pub hide_passwords: bool,
|
||||
pub manage: bool,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[diesel(table_name = ciphers_collections)]
|
||||
#[diesel(primary_key(cipher_uuid, collection_uuid))]
|
||||
pub struct CollectionCipher {
|
||||
pub cipher_uuid: String,
|
||||
pub collection_uuid: String,
|
||||
pub cipher_uuid: CipherId,
|
||||
pub collection_uuid: CollectionId,
|
||||
}
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl Collection {
|
||||
pub fn new(org_uuid: String, name: String, external_id: Option<String>) -> Self {
|
||||
pub fn new(org_uuid: OrganizationId, name: String, external_id: Option<String>) -> Self {
|
||||
let mut new_model = Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: CollectionId(crate::util::get_uuid()),
|
||||
org_uuid,
|
||||
name,
|
||||
external_id: None,
|
||||
@@ -74,22 +80,30 @@ impl Collection {
|
||||
|
||||
pub async fn to_json_details(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
let (read_only, hide_passwords, can_manage) = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
|
||||
// Only for Manager types Bitwarden returns true for the can_manage option
|
||||
// Owners and Admins always have true
|
||||
Some(uo) if uo.has_full_access() => (false, false, uo.atype >= UserOrgType::Manager),
|
||||
Some(uo) => {
|
||||
let (read_only, hide_passwords, manage) = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
match cipher_sync_data.members.get(&self.org_uuid) {
|
||||
// Only for Manager types Bitwarden returns true for the manage option
|
||||
// Owners and Admins always have true. Users are not able to have full access
|
||||
Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager),
|
||||
Some(m) => {
|
||||
// Only let a manager manage collections when the have full read/write access
|
||||
let is_manager = uo.atype == UserOrgType::Manager;
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||
(uc.read_only, uc.hide_passwords, is_manager && !uc.read_only && !uc.hide_passwords)
|
||||
let is_manager = m.atype == MembershipType::Manager;
|
||||
if let Some(cu) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||
(
|
||||
cu.read_only,
|
||||
cu.hide_passwords,
|
||||
cu.manage || (is_manager && !cu.read_only && !cu.hide_passwords),
|
||||
)
|
||||
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
|
||||
(cg.read_only, cg.hide_passwords, is_manager && !cg.read_only && !cg.hide_passwords)
|
||||
(
|
||||
cg.read_only,
|
||||
cg.hide_passwords,
|
||||
cg.manage || (is_manager && !cg.read_only && !cg.hide_passwords),
|
||||
)
|
||||
} else {
|
||||
(false, false, false)
|
||||
}
|
||||
@@ -97,19 +111,16 @@ impl Collection {
|
||||
_ => (true, true, false),
|
||||
}
|
||||
} else {
|
||||
match UserOrganization::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
Some(ou) if ou.has_full_access() => (false, false, ou.atype >= UserOrgType::Manager),
|
||||
Some(ou) => {
|
||||
let is_manager = ou.atype == UserOrgType::Manager;
|
||||
match Membership::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager),
|
||||
Some(_) if self.is_manageable_by_user(user_uuid, conn).await => (false, false, true),
|
||||
Some(m) => {
|
||||
let is_manager = m.atype == MembershipType::Manager;
|
||||
let read_only = !self.is_writable_by_user(user_uuid, conn).await;
|
||||
let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await;
|
||||
(read_only, hide_passwords, is_manager && !read_only && !hide_passwords)
|
||||
}
|
||||
_ => (
|
||||
!self.is_writable_by_user(user_uuid, conn).await,
|
||||
self.hide_passwords_for_user(user_uuid, conn).await,
|
||||
false,
|
||||
),
|
||||
_ => (true, true, false),
|
||||
}
|
||||
};
|
||||
|
||||
@@ -117,17 +128,17 @@ impl Collection {
|
||||
json_object["object"] = json!("collectionDetails");
|
||||
json_object["readOnly"] = json!(read_only);
|
||||
json_object["hidePasswords"] = json!(hide_passwords);
|
||||
json_object["manage"] = json!(can_manage);
|
||||
json_object["manage"] = json!(manage);
|
||||
json_object
|
||||
}
|
||||
|
||||
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
||||
org_user.has_status(UserOrgStatus::Confirmed)
|
||||
&& (org_user.has_full_access()
|
||||
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|
||||
pub async fn can_access_collection(member: &Membership, col_id: &CollectionId, conn: &mut DbConn) -> bool {
|
||||
member.has_status(MembershipStatus::Confirmed)
|
||||
&& (member.has_full_access()
|
||||
|| CollectionUser::has_access_to_collection_by_user(col_id, &member.user_uuid, conn).await
|
||||
|| (CONFIG.org_groups_enabled()
|
||||
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|
||||
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
|
||||
&& (GroupUser::has_full_access_by_member(&member.org_uuid, &member.uuid, conn).await
|
||||
|| GroupUser::has_access_to_collection_by_member(col_id, &member.uuid, conn).await)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,7 +196,7 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
|
||||
for collection in Self::find_by_organization(org_uuid, conn).await {
|
||||
collection.delete(conn).await?;
|
||||
}
|
||||
@@ -193,12 +204,12 @@ impl Collection {
|
||||
}
|
||||
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) {
|
||||
for user_org in UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||
for member in Membership::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&member.user_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &CollectionId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
@@ -208,7 +219,7 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user_uuid(user_uuid: UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
@@ -234,7 +245,7 @@ impl Collection {
|
||||
)
|
||||
))
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||
)
|
||||
.filter(
|
||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||
@@ -265,7 +276,7 @@ impl Collection {
|
||||
)
|
||||
))
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||
)
|
||||
.filter(
|
||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||
@@ -279,15 +290,19 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_organization_and_user_uuid(
|
||||
org_uuid: &OrganizationId,
|
||||
user_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|c| c.org_uuid == org_uuid)
|
||||
.filter(|c| &c.org_uuid == org_uuid)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
@@ -297,7 +312,7 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
@@ -308,7 +323,11 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_org(
|
||||
uuid: &CollectionId,
|
||||
org_uuid: &OrganizationId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
@@ -320,7 +339,7 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &CollectionId, user_uuid: UserId, conn: &mut DbConn) -> Option<Self> {
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
@@ -349,7 +368,7 @@ impl Collection {
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
@@ -378,7 +397,7 @@ impl Collection {
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||
))
|
||||
).select(collections::all_columns)
|
||||
.first::<CollectionDb>(conn).ok()
|
||||
@@ -387,7 +406,7 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn is_writable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
let user_uuid = user_uuid.to_string();
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! { conn: {
|
||||
@@ -411,7 +430,7 @@ impl Collection {
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||
.and(collections_groups::collections_uuid.eq(collections::uuid))
|
||||
))
|
||||
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
.filter(users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||
.or(users_organizations::access_all.eq(true)) // access_all via membership
|
||||
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
@@ -436,7 +455,7 @@ impl Collection {
|
||||
users_collections::collection_uuid.eq(collections::uuid)
|
||||
.and(users_collections::user_uuid.eq(user_uuid))
|
||||
))
|
||||
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
.filter(users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||
.or(users_organizations::access_all.eq(true)) // access_all via membership
|
||||
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
|
||||
.and(users_collections::read_only.eq(false)))
|
||||
@@ -449,7 +468,7 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn hide_passwords_for_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
let user_uuid = user_uuid.to_string();
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
@@ -478,7 +497,7 @@ impl Collection {
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
@@ -494,11 +513,61 @@ impl Collection {
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
let user_uuid = user_uuid.to_string();
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(&self.uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::manage.eq(true)).or(// Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null().and(
|
||||
collections_groups::manage.eq(true))
|
||||
)
|
||||
)
|
||||
)
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
.unwrap_or(0) != 0
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
/// Database methods
|
||||
impl CollectionUser {
|
||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_organization_and_user_uuid(
|
||||
org_uuid: &OrganizationId,
|
||||
user_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
@@ -511,24 +580,30 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
pub async fn find_by_organization_swap_user_uuid_with_member_uuid(
|
||||
org_uuid: &OrganizationId,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<CollectionMembership> {
|
||||
let col_users = db_run! { conn: {
|
||||
users_collections::table
|
||||
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords))
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords, users_collections::manage))
|
||||
.load::<CollectionUserDb>(conn)
|
||||
.expect("Error loading users_collections")
|
||||
.from_db()
|
||||
}}
|
||||
}};
|
||||
col_users.into_iter().map(|c| c.into()).collect()
|
||||
}
|
||||
|
||||
pub async fn save(
|
||||
user_uuid: &str,
|
||||
collection_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
collection_uuid: &CollectionId,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
manage: bool,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
@@ -541,6 +616,7 @@ impl CollectionUser {
|
||||
users_collections::collection_uuid.eq(collection_uuid),
|
||||
users_collections::read_only.eq(read_only),
|
||||
users_collections::hide_passwords.eq(hide_passwords),
|
||||
users_collections::manage.eq(manage),
|
||||
))
|
||||
.execute(conn)
|
||||
{
|
||||
@@ -555,6 +631,7 @@ impl CollectionUser {
|
||||
users_collections::collection_uuid.eq(collection_uuid),
|
||||
users_collections::read_only.eq(read_only),
|
||||
users_collections::hide_passwords.eq(hide_passwords),
|
||||
users_collections::manage.eq(manage),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error adding user to collection")
|
||||
@@ -569,12 +646,14 @@ impl CollectionUser {
|
||||
users_collections::collection_uuid.eq(collection_uuid),
|
||||
users_collections::read_only.eq(read_only),
|
||||
users_collections::hide_passwords.eq(hide_passwords),
|
||||
users_collections::manage.eq(manage),
|
||||
))
|
||||
.on_conflict((users_collections::user_uuid, users_collections::collection_uuid))
|
||||
.do_update()
|
||||
.set((
|
||||
users_collections::read_only.eq(read_only),
|
||||
users_collections::hide_passwords.eq(hide_passwords),
|
||||
users_collections::manage.eq(manage),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error adding user to collection")
|
||||
@@ -596,7 +675,7 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||
@@ -607,24 +686,27 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_collection_swap_user_uuid_with_org_user_uuid(
|
||||
collection_uuid: &str,
|
||||
pub async fn find_by_org_and_coll_swap_user_uuid_with_member_uuid(
|
||||
org_uuid: &OrganizationId,
|
||||
collection_uuid: &CollectionId,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
) -> Vec<CollectionMembership> {
|
||||
let col_users = db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
|
||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords))
|
||||
.select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords, users_collections::manage))
|
||||
.load::<CollectionUserDb>(conn)
|
||||
.expect("Error loading users_collections")
|
||||
.from_db()
|
||||
}}
|
||||
}};
|
||||
col_users.into_iter().map(|c| c.into()).collect()
|
||||
}
|
||||
|
||||
pub async fn find_by_collection_and_user(
|
||||
collection_uuid: &str,
|
||||
user_uuid: &str,
|
||||
collection_uuid: &CollectionId,
|
||||
user_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
@@ -638,7 +720,7 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
@@ -649,7 +731,7 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||
for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn).await;
|
||||
}
|
||||
@@ -661,7 +743,11 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user_and_org(
|
||||
user_uuid: &UserId,
|
||||
org_uuid: &OrganizationId,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
@@ -677,14 +763,18 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn has_access_to_collection_by_user(
|
||||
col_id: &CollectionId,
|
||||
user_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
/// Database methods
|
||||
impl CollectionCipher {
|
||||
pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn save(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn).await;
|
||||
|
||||
db_run! { conn:
|
||||
@@ -714,7 +804,7 @@ impl CollectionCipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
@@ -728,7 +818,7 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
|
||||
.execute(conn)
|
||||
@@ -736,7 +826,7 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
|
||||
.execute(conn)
|
||||
@@ -744,9 +834,63 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn update_users_revision(collection_uuid: &str, conn: &mut DbConn) {
|
||||
pub async fn update_users_revision(collection_uuid: &CollectionId, conn: &mut DbConn) {
|
||||
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await {
|
||||
collection.update_users_revision(conn).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Added in case we need the membership_uuid instead of the user_uuid
|
||||
pub struct CollectionMembership {
|
||||
pub membership_uuid: MembershipId,
|
||||
pub collection_uuid: CollectionId,
|
||||
pub read_only: bool,
|
||||
pub hide_passwords: bool,
|
||||
pub manage: bool,
|
||||
}
|
||||
|
||||
impl CollectionMembership {
|
||||
pub fn to_json_details_for_member(&self, membership_type: i32) -> Value {
|
||||
json!({
|
||||
"id": self.membership_uuid,
|
||||
"readOnly": self.read_only,
|
||||
"hidePasswords": self.hide_passwords,
|
||||
"manage": membership_type >= MembershipType::Admin
|
||||
|| self.manage
|
||||
|| (membership_type == MembershipType::Manager
|
||||
&& !self.read_only
|
||||
&& !self.hide_passwords),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CollectionUser> for CollectionMembership {
|
||||
fn from(c: CollectionUser) -> Self {
|
||||
Self {
|
||||
membership_uuid: c.user_uuid.to_string().into(),
|
||||
collection_uuid: c.collection_uuid,
|
||||
read_only: c.read_only,
|
||||
hide_passwords: c.hide_passwords,
|
||||
manage: c.manage,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
AsRef,
|
||||
Deref,
|
||||
DieselNewType,
|
||||
Display,
|
||||
From,
|
||||
FromForm,
|
||||
Hash,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
UuidFromParam,
|
||||
)]
|
||||
pub struct CollectionId(String);
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use derive_more::{Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{crypto, CONFIG};
|
||||
use core::fmt;
|
||||
use super::{AuthRequest, UserId};
|
||||
use crate::{crypto, util::format_date, CONFIG};
|
||||
use macros::IdFromParam;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -9,11 +12,11 @@ db_object! {
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid, user_uuid))]
|
||||
pub struct Device {
|
||||
pub uuid: String,
|
||||
pub uuid: DeviceId,
|
||||
pub created_at: NaiveDateTime,
|
||||
pub updated_at: NaiveDateTime,
|
||||
|
||||
pub user_uuid: String,
|
||||
pub user_uuid: UserId,
|
||||
|
||||
pub name: String,
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/dcc199bcce4aa2d5621f6fab80f1b49d8b143418/src/Core/Enums/DeviceType.cs
|
||||
@@ -21,14 +24,13 @@ db_object! {
|
||||
pub push_token: Option<String>,
|
||||
|
||||
pub refresh_token: String,
|
||||
|
||||
pub twofactor_remember: Option<String>,
|
||||
}
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl Device {
|
||||
pub fn new(uuid: String, user_uuid: String, name: String, atype: i32) -> Self {
|
||||
pub fn new(uuid: DeviceId, user_uuid: UserId, name: String, atype: i32) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
@@ -47,6 +49,18 @@ impl Device {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"id": self.uuid,
|
||||
"name": self.name,
|
||||
"type": self.atype,
|
||||
"identifier": self.push_uuid,
|
||||
"creationDate": format_date(&self.created_at),
|
||||
"isTrusted": false,
|
||||
"object":"device"
|
||||
})
|
||||
}
|
||||
|
||||
pub fn refresh_twofactor_remember(&mut self) -> String {
|
||||
use data_encoding::BASE64;
|
||||
let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64);
|
||||
@@ -75,12 +89,12 @@ impl Device {
|
||||
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
||||
// ---
|
||||
// fn arg: orgs: Vec<super::UserOrganization>,
|
||||
// fn arg: members: Vec<super::Membership>,
|
||||
// ---
|
||||
// let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orgowner: Vec<_> = members.iter().filter(|m| m.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orgadmin: Vec<_> = members.iter().filter(|m| m.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orguser: Vec<_> = members.iter().filter(|m| m.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
||||
// let orgmanager: Vec<_> = members.iter().filter(|m| m.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||
|
||||
// Create the JWT claims struct, to send to the client
|
||||
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
||||
@@ -123,6 +137,36 @@ impl Device {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DeviceWithAuthRequest {
|
||||
pub device: Device,
|
||||
pub pending_auth_request: Option<AuthRequest>,
|
||||
}
|
||||
|
||||
impl DeviceWithAuthRequest {
|
||||
pub fn to_json(&self) -> Value {
|
||||
let auth_request = match &self.pending_auth_request {
|
||||
Some(auth_request) => auth_request.to_json_for_pending_device(),
|
||||
None => Value::Null,
|
||||
};
|
||||
json!({
|
||||
"id": self.device.uuid,
|
||||
"name": self.device.name,
|
||||
"type": self.device.atype,
|
||||
"identifier": self.device.push_uuid,
|
||||
"creationDate": format_date(&self.device.created_at),
|
||||
"devicePendingAuthRequest": auth_request,
|
||||
"isTrusted": false,
|
||||
"object": "device",
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from(c: Device, a: Option<AuthRequest>) -> Self {
|
||||
Self {
|
||||
device: c,
|
||||
pending_auth_request: a,
|
||||
}
|
||||
}
|
||||
}
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
@@ -150,7 +194,7 @@ impl Device {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
@@ -158,7 +202,7 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &DeviceId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::uuid.eq(uuid))
|
||||
@@ -169,7 +213,17 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_with_auth_request_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<DeviceWithAuthRequest> {
|
||||
let devices = Self::find_by_user(user_uuid, conn).await;
|
||||
let mut result = Vec::new();
|
||||
for device in devices {
|
||||
let auth_request = AuthRequest::find_by_user_and_requested_device(user_uuid, &device.uuid, conn).await;
|
||||
result.push(DeviceWithAuthRequest::from(device, auth_request));
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
@@ -179,7 +233,7 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::uuid.eq(uuid))
|
||||
@@ -189,7 +243,7 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn clear_push_token_by_uuid(uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn clear_push_token_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::update(devices::table)
|
||||
.filter(devices::uuid.eq(uuid))
|
||||
@@ -208,7 +262,7 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_latest_active_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_latest_active_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
@@ -219,7 +273,7 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_push_devices_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
@@ -230,7 +284,7 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn check_user_has_push_device(user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn check_user_has_push_device(user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
@@ -243,68 +297,62 @@ impl Device {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Display)]
|
||||
pub enum DeviceType {
|
||||
#[display("Android")]
|
||||
Android = 0,
|
||||
#[display("iOS")]
|
||||
Ios = 1,
|
||||
#[display("Chrome Extension")]
|
||||
ChromeExtension = 2,
|
||||
#[display("Firefox Extension")]
|
||||
FirefoxExtension = 3,
|
||||
#[display("Opera Extension")]
|
||||
OperaExtension = 4,
|
||||
#[display("Edge Extension")]
|
||||
EdgeExtension = 5,
|
||||
#[display("Windows")]
|
||||
WindowsDesktop = 6,
|
||||
#[display("macOS")]
|
||||
MacOsDesktop = 7,
|
||||
#[display("Linux")]
|
||||
LinuxDesktop = 8,
|
||||
#[display("Chrome")]
|
||||
ChromeBrowser = 9,
|
||||
#[display("Firefox")]
|
||||
FirefoxBrowser = 10,
|
||||
#[display("Opera")]
|
||||
OperaBrowser = 11,
|
||||
#[display("Edge")]
|
||||
EdgeBrowser = 12,
|
||||
#[display("Internet Explorer")]
|
||||
IEBrowser = 13,
|
||||
#[display("Unknown Browser")]
|
||||
UnknownBrowser = 14,
|
||||
#[display("Android")]
|
||||
AndroidAmazon = 15,
|
||||
#[display("UWP")]
|
||||
Uwp = 16,
|
||||
#[display("Safari")]
|
||||
SafariBrowser = 17,
|
||||
#[display("Vivaldi")]
|
||||
VivaldiBrowser = 18,
|
||||
#[display("Vivaldi Extension")]
|
||||
VivaldiExtension = 19,
|
||||
#[display("Safari Extension")]
|
||||
SafariExtension = 20,
|
||||
#[display("SDK")]
|
||||
Sdk = 21,
|
||||
#[display("Server")]
|
||||
Server = 22,
|
||||
#[display("Windows CLI")]
|
||||
WindowsCLI = 23,
|
||||
#[display("macOS CLI")]
|
||||
MacOsCLI = 24,
|
||||
#[display("Linux CLI")]
|
||||
LinuxCLI = 25,
|
||||
}
|
||||
|
||||
impl fmt::Display for DeviceType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
DeviceType::Android => write!(f, "Android"),
|
||||
DeviceType::Ios => write!(f, "iOS"),
|
||||
DeviceType::ChromeExtension => write!(f, "Chrome Extension"),
|
||||
DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
|
||||
DeviceType::OperaExtension => write!(f, "Opera Extension"),
|
||||
DeviceType::EdgeExtension => write!(f, "Edge Extension"),
|
||||
DeviceType::WindowsDesktop => write!(f, "Windows"),
|
||||
DeviceType::MacOsDesktop => write!(f, "macOS"),
|
||||
DeviceType::LinuxDesktop => write!(f, "Linux"),
|
||||
DeviceType::ChromeBrowser => write!(f, "Chrome"),
|
||||
DeviceType::FirefoxBrowser => write!(f, "Firefox"),
|
||||
DeviceType::OperaBrowser => write!(f, "Opera"),
|
||||
DeviceType::EdgeBrowser => write!(f, "Edge"),
|
||||
DeviceType::IEBrowser => write!(f, "Internet Explorer"),
|
||||
DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
|
||||
DeviceType::AndroidAmazon => write!(f, "Android"),
|
||||
DeviceType::Uwp => write!(f, "UWP"),
|
||||
DeviceType::SafariBrowser => write!(f, "Safari"),
|
||||
DeviceType::VivaldiBrowser => write!(f, "Vivaldi"),
|
||||
DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
|
||||
DeviceType::SafariExtension => write!(f, "Safari Extension"),
|
||||
DeviceType::Sdk => write!(f, "SDK"),
|
||||
DeviceType::Server => write!(f, "Server"),
|
||||
DeviceType::WindowsCLI => write!(f, "Windows CLI"),
|
||||
DeviceType::MacOsCLI => write!(f, "macOS CLI"),
|
||||
DeviceType::LinuxCLI => write!(f, "Linux CLI"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DeviceType {
|
||||
pub fn from_i32(value: i32) -> DeviceType {
|
||||
match value {
|
||||
@@ -338,3 +386,8 @@ impl DeviceType {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone, Debug, DieselNewType, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam,
|
||||
)]
|
||||
pub struct DeviceId(String);
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use derive_more::{AsRef, Deref, Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{User, UserId};
|
||||
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||
|
||||
use super::User;
|
||||
use macros::UuidFromParam;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -11,9 +12,9 @@ db_object! {
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct EmergencyAccess {
|
||||
pub uuid: String,
|
||||
pub grantor_uuid: String,
|
||||
pub grantee_uuid: Option<String>,
|
||||
pub uuid: EmergencyAccessId,
|
||||
pub grantor_uuid: UserId,
|
||||
pub grantee_uuid: Option<UserId>,
|
||||
pub email: Option<String>,
|
||||
pub key_encrypted: Option<String>,
|
||||
pub atype: i32, //EmergencyAccessType
|
||||
@@ -29,11 +30,11 @@ db_object! {
|
||||
// Local methods
|
||||
|
||||
impl EmergencyAccess {
|
||||
pub fn new(grantor_uuid: String, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self {
|
||||
pub fn new(grantor_uuid: UserId, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: EmergencyAccessId(crate::util::get_uuid()),
|
||||
grantor_uuid,
|
||||
grantee_uuid: None,
|
||||
email: Some(email),
|
||||
@@ -82,7 +83,7 @@ impl EmergencyAccess {
|
||||
}
|
||||
|
||||
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
|
||||
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
||||
let grantee_user = if let Some(grantee_uuid) = &self.grantee_uuid {
|
||||
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
|
||||
} else if let Some(email) = self.email.as_deref() {
|
||||
match User::find_by_mail(email, conn).await {
|
||||
@@ -211,7 +212,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await {
|
||||
ea.delete(conn).await?;
|
||||
}
|
||||
@@ -239,8 +240,8 @@ impl EmergencyAccess {
|
||||
}
|
||||
|
||||
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||
grantor_uuid: &str,
|
||||
grantee_uuid: &str,
|
||||
grantor_uuid: &UserId,
|
||||
grantee_uuid: &UserId,
|
||||
email: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
@@ -262,7 +263,11 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_grantor_uuid(
|
||||
uuid: &EmergencyAccessId,
|
||||
grantor_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
@@ -272,7 +277,11 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_grantee_uuid(uuid: &str, grantee_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_grantee_uuid(
|
||||
uuid: &EmergencyAccessId,
|
||||
grantee_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
@@ -282,7 +291,11 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid_and_grantee_email(uuid: &str, grantee_email: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_grantee_email(
|
||||
uuid: &EmergencyAccessId,
|
||||
grantee_email: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
@@ -292,7 +305,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_by_grantee_uuid(grantee_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
|
||||
@@ -319,7 +332,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_by_grantor_uuid(grantor_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::grantor_uuid.eq(grantor_uuid))
|
||||
@@ -327,7 +340,12 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn accept_invite(
|
||||
&mut self,
|
||||
grantee_uuid: &UserId,
|
||||
grantee_email: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
|
||||
err!("User email does not match invite.");
|
||||
}
|
||||
@@ -337,10 +355,28 @@ impl EmergencyAccess {
|
||||
}
|
||||
|
||||
self.status = EmergencyAccessStatus::Accepted as i32;
|
||||
self.grantee_uuid = Some(String::from(grantee_uuid));
|
||||
self.grantee_uuid = Some(grantee_uuid.clone());
|
||||
self.email = None;
|
||||
self.save(conn).await
|
||||
}
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
AsRef,
|
||||
Deref,
|
||||
DieselNewType,
|
||||
Display,
|
||||
From,
|
||||
FromForm,
|
||||
Hash,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
UuidFromParam,
|
||||
)]
|
||||
pub struct EmergencyAccessId(String);
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use crate::db::DbConn;
|
||||
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||
//use derive_more::{AsRef, Deref, Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{api::EmptyResult, error::MapResult, CONFIG};
|
||||
|
||||
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||
use super::{CipherId, CollectionId, GroupId, MembershipId, OrgPolicyId, OrganizationId, UserId};
|
||||
use crate::{api::EmptyResult, db::DbConn, error::MapResult, CONFIG};
|
||||
|
||||
// https://bitwarden.com/help/event-logs/
|
||||
|
||||
@@ -15,20 +15,20 @@ db_object! {
|
||||
#[diesel(table_name = event)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Event {
|
||||
pub uuid: String,
|
||||
pub uuid: EventId,
|
||||
pub event_type: i32, // EventType
|
||||
pub user_uuid: Option<String>,
|
||||
pub org_uuid: Option<String>,
|
||||
pub cipher_uuid: Option<String>,
|
||||
pub collection_uuid: Option<String>,
|
||||
pub group_uuid: Option<String>,
|
||||
pub org_user_uuid: Option<String>,
|
||||
pub act_user_uuid: Option<String>,
|
||||
pub user_uuid: Option<UserId>,
|
||||
pub org_uuid: Option<OrganizationId>,
|
||||
pub cipher_uuid: Option<CipherId>,
|
||||
pub collection_uuid: Option<CollectionId>,
|
||||
pub group_uuid: Option<GroupId>,
|
||||
pub org_user_uuid: Option<MembershipId>,
|
||||
pub act_user_uuid: Option<UserId>,
|
||||
// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs
|
||||
pub device_type: Option<i32>,
|
||||
pub ip_address: Option<String>,
|
||||
pub event_date: NaiveDateTime,
|
||||
pub policy_uuid: Option<String>,
|
||||
pub policy_uuid: Option<OrgPolicyId>,
|
||||
pub provider_uuid: Option<String>,
|
||||
pub provider_user_uuid: Option<String>,
|
||||
pub provider_org_uuid: Option<String>,
|
||||
@@ -49,6 +49,8 @@ pub enum EventType {
|
||||
UserClientExportedVault = 1007,
|
||||
// UserUpdatedTempPassword = 1008, // Not supported
|
||||
// UserMigratedKeyToKeyConnector = 1009, // Not supported
|
||||
UserRequestedDeviceApproval = 1010,
|
||||
// UserTdeOffboardingPasswordSet = 1011, // Not supported
|
||||
|
||||
// Cipher
|
||||
CipherCreated = 1100,
|
||||
@@ -69,6 +71,7 @@ pub enum EventType {
|
||||
CipherSoftDeleted = 1115,
|
||||
CipherRestored = 1116,
|
||||
CipherClientToggledCardNumberVisible = 1117,
|
||||
CipherClientToggledTOTPSeedVisible = 1118,
|
||||
|
||||
// Collection
|
||||
CollectionCreated = 1300,
|
||||
@@ -94,6 +97,10 @@ pub enum EventType {
|
||||
// OrganizationUserFirstSsoLogin = 1510, // Not supported
|
||||
OrganizationUserRevoked = 1511,
|
||||
OrganizationUserRestored = 1512,
|
||||
OrganizationUserApprovedAuthRequest = 1513,
|
||||
OrganizationUserRejectedAuthRequest = 1514,
|
||||
OrganizationUserDeleted = 1515,
|
||||
OrganizationUserLeft = 1516,
|
||||
|
||||
// Organization
|
||||
OrganizationUpdated = 1600,
|
||||
@@ -105,6 +112,7 @@ pub enum EventType {
|
||||
// OrganizationEnabledKeyConnector = 1606, // Not supported
|
||||
// OrganizationDisabledKeyConnector = 1607, // Not supported
|
||||
// OrganizationSponsorshipsSynced = 1608, // Not supported
|
||||
// OrganizationCollectionManagementUpdated = 1609, // Not supported
|
||||
|
||||
// Policy
|
||||
PolicyUpdated = 1700,
|
||||
@@ -117,6 +125,13 @@ pub enum EventType {
|
||||
// ProviderOrganizationAdded = 1901, // Not supported
|
||||
// ProviderOrganizationRemoved = 1902, // Not supported
|
||||
// ProviderOrganizationVaultAccessed = 1903, // Not supported
|
||||
|
||||
// OrganizationDomainAdded = 2000, // Not supported
|
||||
// OrganizationDomainRemoved = 2001, // Not supported
|
||||
// OrganizationDomainVerified = 2002, // Not supported
|
||||
// OrganizationDomainNotVerified = 2003, // Not supported
|
||||
|
||||
// SecretRetrieved = 2100, // Not supported
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
@@ -128,7 +143,7 @@ impl Event {
|
||||
};
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: EventId(crate::util::get_uuid()),
|
||||
event_type,
|
||||
user_uuid: None,
|
||||
org_uuid: None,
|
||||
@@ -246,7 +261,7 @@ impl Event {
|
||||
/// ##############
|
||||
/// Custom Queries
|
||||
pub async fn find_by_organization_uuid(
|
||||
org_uuid: &str,
|
||||
org_uuid: &OrganizationId,
|
||||
start: &NaiveDateTime,
|
||||
end: &NaiveDateTime,
|
||||
conn: &mut DbConn,
|
||||
@@ -263,7 +278,7 @@ impl Event {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
event::table
|
||||
.filter(event::org_uuid.eq(org_uuid))
|
||||
@@ -274,16 +289,16 @@ impl Event {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_org_and_user_org(
|
||||
org_uuid: &str,
|
||||
user_org_uuid: &str,
|
||||
pub async fn find_by_org_and_member(
|
||||
org_uuid: &OrganizationId,
|
||||
member_uuid: &MembershipId,
|
||||
start: &NaiveDateTime,
|
||||
end: &NaiveDateTime,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
event::table
|
||||
.inner_join(users_organizations::table.on(users_organizations::uuid.eq(user_org_uuid)))
|
||||
.inner_join(users_organizations::table.on(users_organizations::uuid.eq(member_uuid)))
|
||||
.filter(event::org_uuid.eq(org_uuid))
|
||||
.filter(event::event_date.between(start, end))
|
||||
.filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable())))
|
||||
@@ -297,7 +312,7 @@ impl Event {
|
||||
}
|
||||
|
||||
pub async fn find_by_cipher_uuid(
|
||||
cipher_uuid: &str,
|
||||
cipher_uuid: &CipherId,
|
||||
start: &NaiveDateTime,
|
||||
end: &NaiveDateTime,
|
||||
conn: &mut DbConn,
|
||||
@@ -327,3 +342,6 @@ impl Event {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, DieselNewType, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct EventId(String);
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use super::User;
|
||||
use super::{CipherId, User, UserId};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[diesel(table_name = favorites)]
|
||||
#[diesel(primary_key(user_uuid, cipher_uuid))]
|
||||
pub struct Favorite {
|
||||
pub user_uuid: String,
|
||||
pub cipher_uuid: String,
|
||||
pub user_uuid: UserId,
|
||||
pub cipher_uuid: CipherId,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ use crate::error::MapResult;
|
||||
|
||||
impl Favorite {
|
||||
// Returns whether the specified cipher is a favorite of the specified user.
|
||||
pub async fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn is_favorite(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
db_run! { conn: {
|
||||
let query = favorites::table
|
||||
.filter(favorites::cipher_uuid.eq(cipher_uuid))
|
||||
@@ -29,7 +29,12 @@ impl Favorite {
|
||||
}
|
||||
|
||||
// Sets whether the specified cipher is a favorite of the specified user.
|
||||
pub async fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn set_favorite(
|
||||
favorite: bool,
|
||||
cipher_uuid: &CipherId,
|
||||
user_uuid: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite);
|
||||
match (old, new) {
|
||||
(false, true) => {
|
||||
@@ -62,7 +67,7 @@ impl Favorite {
|
||||
}
|
||||
|
||||
// Delete all favorite entries associated with the specified cipher.
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid)))
|
||||
.execute(conn)
|
||||
@@ -71,7 +76,7 @@ impl Favorite {
|
||||
}
|
||||
|
||||
// Delete all favorite entries associated with the specified user.
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
@@ -81,12 +86,12 @@ impl Favorite {
|
||||
|
||||
/// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers
|
||||
/// This is used during a full sync so we only need one query for all favorite cipher matches.
|
||||
pub async fn get_all_cipher_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<String> {
|
||||
pub async fn get_all_cipher_uuid_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<CipherId> {
|
||||
db_run! { conn: {
|
||||
favorites::table
|
||||
.filter(favorites::user_uuid.eq(user_uuid))
|
||||
.select(favorites::cipher_uuid)
|
||||
.load::<String>(conn)
|
||||
.load::<CipherId>(conn)
|
||||
.unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,19 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use derive_more::{AsRef, Deref, Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::User;
|
||||
use super::{CipherId, User, UserId};
|
||||
use macros::UuidFromParam;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = folders)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Folder {
|
||||
pub uuid: String,
|
||||
pub uuid: FolderId,
|
||||
pub created_at: NaiveDateTime,
|
||||
pub updated_at: NaiveDateTime,
|
||||
pub user_uuid: String,
|
||||
pub user_uuid: UserId,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
@@ -19,18 +21,18 @@ db_object! {
|
||||
#[diesel(table_name = folders_ciphers)]
|
||||
#[diesel(primary_key(cipher_uuid, folder_uuid))]
|
||||
pub struct FolderCipher {
|
||||
pub cipher_uuid: String,
|
||||
pub folder_uuid: String,
|
||||
pub cipher_uuid: CipherId,
|
||||
pub folder_uuid: FolderId,
|
||||
}
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl Folder {
|
||||
pub fn new(user_uuid: String, name: String) -> Self {
|
||||
pub fn new(user_uuid: UserId, name: String) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: FolderId(crate::util::get_uuid()),
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
|
||||
@@ -52,10 +54,10 @@ impl Folder {
|
||||
}
|
||||
|
||||
impl FolderCipher {
|
||||
pub fn new(folder_uuid: &str, cipher_uuid: &str) -> Self {
|
||||
pub fn new(folder_uuid: FolderId, cipher_uuid: CipherId) -> Self {
|
||||
Self {
|
||||
folder_uuid: folder_uuid.to_string(),
|
||||
cipher_uuid: cipher_uuid.to_string(),
|
||||
folder_uuid,
|
||||
cipher_uuid,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -113,24 +115,25 @@ impl Folder {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
for folder in Self::find_by_user(user_uuid, conn).await {
|
||||
folder.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &FolderId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
folders::table
|
||||
.filter(folders::uuid.eq(uuid))
|
||||
.filter(folders::user_uuid.eq(user_uuid))
|
||||
.first::<FolderDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
folders::table
|
||||
.filter(folders::user_uuid.eq(user_uuid))
|
||||
@@ -176,7 +179,7 @@ impl FolderCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)))
|
||||
.execute(conn)
|
||||
@@ -184,7 +187,7 @@ impl FolderCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_folder(folder_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
|
||||
.execute(conn)
|
||||
@@ -192,7 +195,11 @@ impl FolderCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_folder_and_cipher(
|
||||
folder_uuid: &FolderId,
|
||||
cipher_uuid: &CipherId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
folders_ciphers::table
|
||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||
@@ -203,7 +210,7 @@ impl FolderCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
folders_ciphers::table
|
||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||
@@ -215,14 +222,32 @@ impl FolderCipher {
|
||||
|
||||
/// Return a vec with (cipher_uuid, folder_uuid)
|
||||
/// This is used during a full sync so we only need one query for all folder matches.
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<(String, String)> {
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<(CipherId, FolderId)> {
|
||||
db_run! { conn: {
|
||||
folders_ciphers::table
|
||||
.inner_join(folders::table)
|
||||
.filter(folders::user_uuid.eq(user_uuid))
|
||||
.select(folders_ciphers::all_columns)
|
||||
.load::<(String, String)>(conn)
|
||||
.load::<(CipherId, FolderId)>(conn)
|
||||
.unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
AsRef,
|
||||
Deref,
|
||||
DieselNewType,
|
||||
Display,
|
||||
From,
|
||||
FromForm,
|
||||
Hash,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
UuidFromParam,
|
||||
)]
|
||||
pub struct FolderId(String);
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use super::{User, UserOrgType, UserOrganization};
|
||||
use super::{CollectionId, Membership, MembershipId, OrganizationId, User, UserId};
|
||||
use crate::api::EmptyResult;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use derive_more::{AsRef, Deref, Display, From};
|
||||
use macros::UuidFromParam;
|
||||
use serde_json::Value;
|
||||
|
||||
db_object! {
|
||||
@@ -10,8 +12,8 @@ db_object! {
|
||||
#[diesel(table_name = groups)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Group {
|
||||
pub uuid: String,
|
||||
pub organizations_uuid: String,
|
||||
pub uuid: GroupId,
|
||||
pub organizations_uuid: OrganizationId,
|
||||
pub name: String,
|
||||
pub access_all: bool,
|
||||
pub external_id: Option<String>,
|
||||
@@ -23,28 +25,34 @@ db_object! {
|
||||
#[diesel(table_name = collections_groups)]
|
||||
#[diesel(primary_key(collections_uuid, groups_uuid))]
|
||||
pub struct CollectionGroup {
|
||||
pub collections_uuid: String,
|
||||
pub groups_uuid: String,
|
||||
pub collections_uuid: CollectionId,
|
||||
pub groups_uuid: GroupId,
|
||||
pub read_only: bool,
|
||||
pub hide_passwords: bool,
|
||||
pub manage: bool,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[diesel(table_name = groups_users)]
|
||||
#[diesel(primary_key(groups_uuid, users_organizations_uuid))]
|
||||
pub struct GroupUser {
|
||||
pub groups_uuid: String,
|
||||
pub users_organizations_uuid: String
|
||||
pub groups_uuid: GroupId,
|
||||
pub users_organizations_uuid: MembershipId
|
||||
}
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl Group {
|
||||
pub fn new(organizations_uuid: String, name: String, access_all: bool, external_id: Option<String>) -> Self {
|
||||
pub fn new(
|
||||
organizations_uuid: OrganizationId,
|
||||
name: String,
|
||||
access_all: bool,
|
||||
external_id: Option<String>,
|
||||
) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
let mut new_model = Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: GroupId(crate::util::get_uuid()),
|
||||
organizations_uuid,
|
||||
name,
|
||||
access_all,
|
||||
@@ -73,7 +81,10 @@ impl Group {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn to_json_details(&self, user_org_type: &i32, conn: &mut DbConn) -> Value {
|
||||
pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
|
||||
// If both read_only and hide_passwords are false, then manage should be true
|
||||
// You can't have an entry with read_only and manage, or hide_passwords and manage
|
||||
// Or an entry with everything to false
|
||||
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
|
||||
.await
|
||||
.iter()
|
||||
@@ -82,7 +93,7 @@ impl Group {
|
||||
"id": entry.collections_uuid,
|
||||
"readOnly": entry.read_only,
|
||||
"hidePasswords": entry.hide_passwords,
|
||||
"manage": *user_org_type >= UserOrgType::Admin || (*user_org_type == UserOrgType::Manager && !entry.read_only && !entry.hide_passwords)
|
||||
"manage": entry.manage,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
@@ -108,18 +119,38 @@ impl Group {
|
||||
}
|
||||
|
||||
impl CollectionGroup {
|
||||
pub fn new(collections_uuid: String, groups_uuid: String, read_only: bool, hide_passwords: bool) -> Self {
|
||||
pub fn new(
|
||||
collections_uuid: CollectionId,
|
||||
groups_uuid: GroupId,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
manage: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
collections_uuid,
|
||||
groups_uuid,
|
||||
read_only,
|
||||
hide_passwords,
|
||||
manage,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_json_details_for_group(&self) -> Value {
|
||||
// If both read_only and hide_passwords are false, then manage should be true
|
||||
// You can't have an entry with read_only and manage, or hide_passwords and manage
|
||||
// Or an entry with everything to false
|
||||
// For backwards compaibility and migration proposes we keep checking read_only and hide_password
|
||||
json!({
|
||||
"id": self.groups_uuid,
|
||||
"readOnly": self.read_only,
|
||||
"hidePasswords": self.hide_passwords,
|
||||
"manage": self.manage || (!self.read_only && !self.hide_passwords),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl GroupUser {
|
||||
pub fn new(groups_uuid: String, users_organizations_uuid: String) -> Self {
|
||||
pub fn new(groups_uuid: GroupId, users_organizations_uuid: MembershipId) -> Self {
|
||||
Self {
|
||||
groups_uuid,
|
||||
users_organizations_uuid,
|
||||
@@ -163,27 +194,27 @@ impl Group {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
|
||||
for group in Self::find_by_organization(org_uuid, conn).await {
|
||||
group.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn find_by_organization(organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
.filter(groups::organizations_uuid.eq(organizations_uuid))
|
||||
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||
.load::<GroupDb>(conn)
|
||||
.expect("Error loading groups")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn count_by_org(organizations_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
.filter(groups::organizations_uuid.eq(organizations_uuid))
|
||||
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.ok()
|
||||
@@ -191,17 +222,22 @@ impl Group {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_org(uuid: &GroupId, org_uuid: &OrganizationId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
.filter(groups::uuid.eq(uuid))
|
||||
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||
.first::<GroupDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_external_id_and_org(external_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_external_id_and_org(
|
||||
external_id: &str,
|
||||
org_uuid: &OrganizationId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
.filter(groups::external_id.eq(external_id))
|
||||
@@ -212,7 +248,7 @@ impl Group {
|
||||
}}
|
||||
}
|
||||
//Returns all organizations the user has full access to
|
||||
pub async fn gather_user_organizations_full_access(user_uuid: &str, conn: &mut DbConn) -> Vec<String> {
|
||||
pub async fn get_orgs_by_user_with_full_access(user_uuid: &UserId, conn: &mut DbConn) -> Vec<OrganizationId> {
|
||||
db_run! { conn: {
|
||||
groups_users::table
|
||||
.inner_join(users_organizations::table.on(
|
||||
@@ -225,12 +261,12 @@ impl Group {
|
||||
.filter(groups::access_all.eq(true))
|
||||
.select(groups::organizations_uuid)
|
||||
.distinct()
|
||||
.load::<String>(conn)
|
||||
.load::<OrganizationId>(conn)
|
||||
.expect("Error loading organization group full access information for user")
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn is_in_full_access_group(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn is_in_full_access_group(user_uuid: &UserId, org_uuid: &OrganizationId, conn: &mut DbConn) -> bool {
|
||||
db_run! { conn: {
|
||||
groups::table
|
||||
.inner_join(groups_users::table.on(
|
||||
@@ -259,13 +295,13 @@ impl Group {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn update_revision(uuid: &str, conn: &mut DbConn) {
|
||||
pub async fn update_revision(uuid: &GroupId, conn: &mut DbConn) {
|
||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
||||
warn!("Failed to update revision for {}: {:#?}", uuid, e);
|
||||
}
|
||||
}
|
||||
|
||||
async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
||||
async fn _update_revision(uuid: &GroupId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! {conn: {
|
||||
crate::util::retry(|| {
|
||||
diesel::update(groups::table.filter(groups::uuid.eq(uuid)))
|
||||
@@ -292,6 +328,7 @@ impl CollectionGroup {
|
||||
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
||||
collections_groups::read_only.eq(&self.read_only),
|
||||
collections_groups::hide_passwords.eq(&self.hide_passwords),
|
||||
collections_groups::manage.eq(&self.manage),
|
||||
))
|
||||
.execute(conn)
|
||||
{
|
||||
@@ -306,6 +343,7 @@ impl CollectionGroup {
|
||||
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
||||
collections_groups::read_only.eq(&self.read_only),
|
||||
collections_groups::hide_passwords.eq(&self.hide_passwords),
|
||||
collections_groups::manage.eq(&self.manage),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error adding group to collection")
|
||||
@@ -320,12 +358,14 @@ impl CollectionGroup {
|
||||
collections_groups::groups_uuid.eq(&self.groups_uuid),
|
||||
collections_groups::read_only.eq(self.read_only),
|
||||
collections_groups::hide_passwords.eq(self.hide_passwords),
|
||||
collections_groups::manage.eq(self.manage),
|
||||
))
|
||||
.on_conflict((collections_groups::collections_uuid, collections_groups::groups_uuid))
|
||||
.do_update()
|
||||
.set((
|
||||
collections_groups::read_only.eq(self.read_only),
|
||||
collections_groups::hide_passwords.eq(self.hide_passwords),
|
||||
collections_groups::manage.eq(self.manage),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error adding group to collection")
|
||||
@@ -333,7 +373,7 @@ impl CollectionGroup {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections_groups::table
|
||||
.filter(collections_groups::groups_uuid.eq(group_uuid))
|
||||
@@ -343,7 +383,7 @@ impl CollectionGroup {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections_groups::table
|
||||
.inner_join(groups_users::table.on(
|
||||
@@ -360,7 +400,7 @@ impl CollectionGroup {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections_groups::table
|
||||
.filter(collections_groups::collections_uuid.eq(collection_uuid))
|
||||
@@ -386,7 +426,7 @@ impl CollectionGroup {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
|
||||
for group_user in group_users {
|
||||
group_user.update_user_revision(conn).await;
|
||||
@@ -400,7 +440,7 @@ impl CollectionGroup {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
|
||||
let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await;
|
||||
for collection_assigned_to_group in collection_assigned_to_groups {
|
||||
let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await;
|
||||
@@ -465,7 +505,7 @@ impl GroupUser {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
groups_users::table
|
||||
.filter(groups_users::groups_uuid.eq(group_uuid))
|
||||
@@ -475,10 +515,10 @@ impl GroupUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
groups_users::table
|
||||
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
|
||||
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||
.load::<GroupUserDb>(conn)
|
||||
.expect("Error loading groups for user")
|
||||
.from_db()
|
||||
@@ -486,8 +526,8 @@ impl GroupUser {
|
||||
}
|
||||
|
||||
pub async fn has_access_to_collection_by_member(
|
||||
collection_uuid: &str,
|
||||
member_uuid: &str,
|
||||
collection_uuid: &CollectionId,
|
||||
member_uuid: &MembershipId,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
db_run! { conn: {
|
||||
@@ -503,7 +543,11 @@ impl GroupUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn has_full_access_by_member(org_uuid: &str, member_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn has_full_access_by_member(
|
||||
org_uuid: &OrganizationId,
|
||||
member_uuid: &MembershipId,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
db_run! { conn: {
|
||||
groups_users::table
|
||||
.inner_join(groups::table.on(
|
||||
@@ -519,32 +563,32 @@ impl GroupUser {
|
||||
}
|
||||
|
||||
pub async fn update_user_revision(&self, conn: &mut DbConn) {
|
||||
match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await {
|
||||
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
||||
None => warn!("User could not be found!"),
|
||||
match Membership::find_by_uuid(&self.users_organizations_uuid, conn).await {
|
||||
Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
|
||||
None => warn!("Member could not be found!"),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_by_group_id_and_user_id(
|
||||
group_uuid: &str,
|
||||
users_organizations_uuid: &str,
|
||||
pub async fn delete_by_group_and_member(
|
||||
group_uuid: &GroupId,
|
||||
member_uuid: &MembershipId,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await {
|
||||
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
||||
None => warn!("User could not be found!"),
|
||||
match Membership::find_by_uuid(member_uuid, conn).await {
|
||||
Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
|
||||
None => warn!("Member could not be found!"),
|
||||
};
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(groups_users::table)
|
||||
.filter(groups_users::groups_uuid.eq(group_uuid))
|
||||
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
|
||||
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||
.execute(conn)
|
||||
.map_res("Error deleting group users")
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult {
|
||||
let group_users = GroupUser::find_by_group(group_uuid, conn).await;
|
||||
for group_user in group_users {
|
||||
group_user.update_user_revision(conn).await;
|
||||
@@ -558,17 +602,35 @@ impl GroupUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await {
|
||||
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
||||
None => warn!("User could not be found!"),
|
||||
pub async fn delete_all_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> EmptyResult {
|
||||
match Membership::find_by_uuid(member_uuid, conn).await {
|
||||
Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
|
||||
None => warn!("Member could not be found!"),
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(groups_users::table)
|
||||
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
|
||||
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||
.execute(conn)
|
||||
.map_res("Error deleting user groups")
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
AsRef,
|
||||
Deref,
|
||||
DieselNewType,
|
||||
Display,
|
||||
From,
|
||||
FromForm,
|
||||
Hash,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
UuidFromParam,
|
||||
)]
|
||||
pub struct GroupId(String);
|
||||
|
||||
@@ -16,20 +16,26 @@ mod two_factor_duo_context;
|
||||
mod two_factor_incomplete;
|
||||
mod user;
|
||||
|
||||
pub use self::attachment::Attachment;
|
||||
pub use self::auth_request::AuthRequest;
|
||||
pub use self::cipher::Cipher;
|
||||
pub use self::collection::{Collection, CollectionCipher, CollectionUser};
|
||||
pub use self::device::{Device, DeviceType};
|
||||
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
|
||||
pub use self::attachment::{Attachment, AttachmentId};
|
||||
pub use self::auth_request::{AuthRequest, AuthRequestId};
|
||||
pub use self::cipher::{Cipher, CipherId, RepromptType};
|
||||
pub use self::collection::{Collection, CollectionCipher, CollectionId, CollectionUser};
|
||||
pub use self::device::{Device, DeviceId, DeviceType};
|
||||
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessId, EmergencyAccessStatus, EmergencyAccessType};
|
||||
pub use self::event::{Event, EventType};
|
||||
pub use self::favorite::Favorite;
|
||||
pub use self::folder::{Folder, FolderCipher};
|
||||
pub use self::group::{CollectionGroup, Group, GroupUser};
|
||||
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
|
||||
pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
pub use self::send::{Send, SendType};
|
||||
pub use self::folder::{Folder, FolderCipher, FolderId};
|
||||
pub use self::group::{CollectionGroup, Group, GroupId, GroupUser};
|
||||
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyId, OrgPolicyType};
|
||||
pub use self::organization::{
|
||||
Membership, MembershipId, MembershipStatus, MembershipType, OrgApiKeyId, Organization, OrganizationApiKey,
|
||||
OrganizationId,
|
||||
};
|
||||
pub use self::send::{
|
||||
id::{SendFileId, SendId},
|
||||
Send, SendType,
|
||||
};
|
||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||
pub use self::two_factor_duo_context::TwoFactorDuoContext;
|
||||
pub use self::two_factor_incomplete::TwoFactorIncomplete;
|
||||
pub use self::user::{Invitation, User, UserKdfType, UserStampException};
|
||||
pub use self::user::{Invitation, User, UserId, UserKdfType, UserStampException};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use derive_more::{AsRef, From};
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
|
||||
@@ -5,15 +6,15 @@ use crate::api::EmptyResult;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
|
||||
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use super::{Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, TwoFactor, UserId};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = org_policies)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct OrgPolicy {
|
||||
pub uuid: String,
|
||||
pub org_uuid: String,
|
||||
pub uuid: OrgPolicyId,
|
||||
pub org_uuid: OrganizationId,
|
||||
pub atype: i32,
|
||||
pub enabled: bool,
|
||||
pub data: String,
|
||||
@@ -62,9 +63,9 @@ pub enum OrgPolicyErr {
|
||||
|
||||
/// Local methods
|
||||
impl OrgPolicy {
|
||||
pub fn new(org_uuid: String, atype: OrgPolicyType, data: String) -> Self {
|
||||
pub fn new(org_uuid: OrganizationId, atype: OrgPolicyType, data: String) -> Self {
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: OrgPolicyId(crate::util::get_uuid()),
|
||||
org_uuid,
|
||||
atype: atype as i32,
|
||||
enabled: false,
|
||||
@@ -142,17 +143,7 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.filter(org_policies::uuid.eq(uuid))
|
||||
.first::<OrgPolicyDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||
@@ -162,7 +153,7 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.inner_join(
|
||||
@@ -171,7 +162,7 @@ impl OrgPolicy {
|
||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||
)
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||
)
|
||||
.select(org_policies::all_columns)
|
||||
.load::<OrgPolicyDb>(conn)
|
||||
@@ -180,7 +171,11 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_org_and_type(org_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_org_and_type(
|
||||
org_uuid: &OrganizationId,
|
||||
policy_type: OrgPolicyType,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||
@@ -191,7 +186,7 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
|
||||
.execute(conn)
|
||||
@@ -200,7 +195,7 @@ impl OrgPolicy {
|
||||
}
|
||||
|
||||
pub async fn find_accepted_and_confirmed_by_user_and_active_policy(
|
||||
user_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
policy_type: OrgPolicyType,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
@@ -212,10 +207,10 @@ impl OrgPolicy {
|
||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||
)
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Accepted as i32)
|
||||
users_organizations::status.eq(MembershipStatus::Accepted as i32)
|
||||
)
|
||||
.or_filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||
)
|
||||
.filter(org_policies::atype.eq(policy_type as i32))
|
||||
.filter(org_policies::enabled.eq(true))
|
||||
@@ -227,7 +222,7 @@ impl OrgPolicy {
|
||||
}
|
||||
|
||||
pub async fn find_confirmed_by_user_and_active_policy(
|
||||
user_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
policy_type: OrgPolicyType,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
@@ -239,7 +234,7 @@ impl OrgPolicy {
|
||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||
)
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
users_organizations::status.eq(MembershipStatus::Confirmed as i32)
|
||||
)
|
||||
.filter(org_policies::atype.eq(policy_type as i32))
|
||||
.filter(org_policies::enabled.eq(true))
|
||||
@@ -254,21 +249,21 @@ impl OrgPolicy {
|
||||
/// and the user is not an owner or admin of that org. This is only useful for checking
|
||||
/// applicability of policy types that have these particular semantics.
|
||||
pub async fn is_applicable_to_user(
|
||||
user_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
policy_type: OrgPolicyType,
|
||||
exclude_org_uuid: Option<&str>,
|
||||
exclude_org_uuid: Option<&OrganizationId>,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
for policy in
|
||||
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await
|
||||
{
|
||||
// Check if we need to skip this organization.
|
||||
if exclude_org_uuid.is_some() && exclude_org_uuid.unwrap() == policy.org_uuid {
|
||||
if exclude_org_uuid.is_some() && *exclude_org_uuid.unwrap() == policy.org_uuid {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < MembershipType::Admin {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -277,8 +272,8 @@ impl OrgPolicy {
|
||||
}
|
||||
|
||||
pub async fn is_user_allowed(
|
||||
user_uuid: &str,
|
||||
org_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
org_uuid: &OrganizationId,
|
||||
exclude_current_org: bool,
|
||||
conn: &mut DbConn,
|
||||
) -> OrgPolicyResult {
|
||||
@@ -306,7 +301,7 @@ impl OrgPolicy {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn org_is_reset_password_auto_enroll(org_uuid: &OrganizationId, conn: &mut DbConn) -> bool {
|
||||
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
|
||||
Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
@@ -322,12 +317,12 @@ impl OrgPolicy {
|
||||
|
||||
/// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
|
||||
/// option of the `Send Options` policy, and the user is not an owner or admin of that org.
|
||||
pub async fn is_hide_email_disabled(user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
pub async fn is_hide_email_disabled(user_uuid: &UserId, conn: &mut DbConn) -> bool {
|
||||
for policy in
|
||||
OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await
|
||||
{
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < MembershipType::Admin {
|
||||
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
if opts.disable_hide_email {
|
||||
@@ -342,12 +337,19 @@ impl OrgPolicy {
|
||||
false
|
||||
}
|
||||
|
||||
pub async fn is_enabled_for_member(org_user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool {
|
||||
if let Some(membership) = UserOrganization::find_by_uuid(org_user_uuid, conn).await {
|
||||
if let Some(policy) = OrgPolicy::find_by_org_and_type(&membership.org_uuid, policy_type, conn).await {
|
||||
pub async fn is_enabled_for_member(
|
||||
member_uuid: &MembershipId,
|
||||
policy_type: OrgPolicyType,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
if let Some(member) = Membership::find_by_uuid(member_uuid, conn).await {
|
||||
if let Some(policy) = OrgPolicy::find_by_org_and_type(&member.org_uuid, policy_type, conn).await {
|
||||
return policy.enabled;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, AsRef, DieselNewType, From, FromForm, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct OrgPolicyId(String);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,8 @@ use serde_json::Value;
|
||||
|
||||
use crate::util::LowerCase;
|
||||
|
||||
use super::User;
|
||||
use super::{OrganizationId, User, UserId};
|
||||
use id::SendId;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -11,11 +12,10 @@ db_object! {
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Send {
|
||||
pub uuid: String,
|
||||
|
||||
pub user_uuid: Option<String>,
|
||||
pub organization_uuid: Option<String>,
|
||||
pub uuid: SendId,
|
||||
|
||||
pub user_uuid: Option<UserId>,
|
||||
pub organization_uuid: Option<OrganizationId>,
|
||||
|
||||
pub name: String,
|
||||
pub notes: Option<String>,
|
||||
@@ -51,7 +51,7 @@ impl Send {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: SendId::from(crate::util::get_uuid()),
|
||||
user_uuid: None,
|
||||
organization_uuid: None,
|
||||
|
||||
@@ -243,7 +243,7 @@ impl Send {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<String> {
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {
|
||||
let mut user_uuids = Vec::new();
|
||||
match &self.user_uuid {
|
||||
Some(user_uuid) => {
|
||||
@@ -257,7 +257,7 @@ impl Send {
|
||||
user_uuids
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
for send in Self::find_by_user(user_uuid, conn).await {
|
||||
send.delete(conn).await?;
|
||||
}
|
||||
@@ -268,20 +268,19 @@ impl Send {
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use uuid::Uuid;
|
||||
|
||||
let uuid_vec = match BASE64URL_NOPAD.decode(access_id.as_bytes()) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return None,
|
||||
let Ok(uuid_vec) = BASE64URL_NOPAD.decode(access_id.as_bytes()) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let uuid = match Uuid::from_slice(&uuid_vec) {
|
||||
Ok(u) => u.to_string(),
|
||||
Ok(u) => SendId::from(u.to_string()),
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
Self::find_by_uuid(&uuid, conn).await
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &SendId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::uuid.eq(uuid))
|
||||
@@ -291,7 +290,18 @@ impl Send {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &SendId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::uuid.eq(uuid))
|
||||
.filter(sends::user_uuid.eq(user_uuid))
|
||||
.first::<SendDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::user_uuid.eq(user_uuid))
|
||||
@@ -299,7 +309,7 @@ impl Send {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> {
|
||||
pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option<i64> {
|
||||
let sends = Self::find_by_user(user_uuid, conn).await;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
@@ -322,7 +332,7 @@ impl Send {
|
||||
Some(total)
|
||||
}
|
||||
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::organization_uuid.eq(org_uuid))
|
||||
@@ -339,3 +349,48 @@ impl Send {
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
// separate namespace to avoid name collision with std::marker::Send
|
||||
pub mod id {
|
||||
use derive_more::{AsRef, Deref, Display, From};
|
||||
use macros::{IdFromParam, UuidFromParam};
|
||||
use std::marker::Send;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
AsRef,
|
||||
Deref,
|
||||
DieselNewType,
|
||||
Display,
|
||||
From,
|
||||
FromForm,
|
||||
Hash,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
UuidFromParam,
|
||||
)]
|
||||
pub struct SendId(String);
|
||||
|
||||
impl AsRef<Path> for SendId {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &Path {
|
||||
Path::new(&self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone, Debug, AsRef, Deref, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam,
|
||||
)]
|
||||
pub struct SendFileId(String);
|
||||
|
||||
impl AsRef<Path> for SendFileId {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &Path {
|
||||
Path::new(&self.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use serde_json::Value;
|
||||
|
||||
use super::UserId;
|
||||
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||
|
||||
db_object! {
|
||||
@@ -7,8 +8,8 @@ db_object! {
|
||||
#[diesel(table_name = twofactor)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct TwoFactor {
|
||||
pub uuid: String,
|
||||
pub user_uuid: String,
|
||||
pub uuid: TwoFactorId,
|
||||
pub user_uuid: UserId,
|
||||
pub atype: i32,
|
||||
pub enabled: bool,
|
||||
pub data: String,
|
||||
@@ -41,9 +42,9 @@ pub enum TwoFactorType {
|
||||
|
||||
/// Local methods
|
||||
impl TwoFactor {
|
||||
pub fn new(user_uuid: String, atype: TwoFactorType, data: String) -> Self {
|
||||
pub fn new(user_uuid: UserId, atype: TwoFactorType, data: String) -> Self {
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
uuid: TwoFactorId(crate::util::get_uuid()),
|
||||
user_uuid,
|
||||
atype: atype as i32,
|
||||
enabled: true,
|
||||
@@ -118,7 +119,7 @@ impl TwoFactor {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
twofactor::table
|
||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||
@@ -129,7 +130,7 @@ impl TwoFactor {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_user_and_type(user_uuid: &UserId, atype: i32, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
twofactor::table
|
||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||
@@ -140,7 +141,7 @@ impl TwoFactor {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
@@ -217,3 +218,6 @@ impl TwoFactor {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, DieselNewType, FromForm, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct TwoFactorId(String);
|
||||
|
||||
@@ -1,17 +1,26 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
|
||||
use crate::{api::EmptyResult, auth::ClientIp, db::DbConn, error::MapResult, CONFIG};
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
auth::ClientIp,
|
||||
db::{
|
||||
models::{DeviceId, UserId},
|
||||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = twofactor_incomplete)]
|
||||
#[diesel(primary_key(user_uuid, device_uuid))]
|
||||
pub struct TwoFactorIncomplete {
|
||||
pub user_uuid: String,
|
||||
pub user_uuid: UserId,
|
||||
// This device UUID is simply what's claimed by the device. It doesn't
|
||||
// necessarily correspond to any UUID in the devices table, since a device
|
||||
// must complete 2FA login before being added into the devices table.
|
||||
pub device_uuid: String,
|
||||
pub device_uuid: DeviceId,
|
||||
pub device_name: String,
|
||||
pub device_type: i32,
|
||||
pub login_time: NaiveDateTime,
|
||||
@@ -21,8 +30,8 @@ db_object! {
|
||||
|
||||
impl TwoFactorIncomplete {
|
||||
pub async fn mark_incomplete(
|
||||
user_uuid: &str,
|
||||
device_uuid: &str,
|
||||
user_uuid: &UserId,
|
||||
device_uuid: &DeviceId,
|
||||
device_name: &str,
|
||||
device_type: i32,
|
||||
ip: &ClientIp,
|
||||
@@ -55,7 +64,7 @@ impl TwoFactorIncomplete {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn mark_complete(user_uuid: &UserId, device_uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult {
|
||||
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -63,7 +72,11 @@ impl TwoFactorIncomplete {
|
||||
Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await
|
||||
}
|
||||
|
||||
pub async fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_user_and_device(
|
||||
user_uuid: &UserId,
|
||||
device_uuid: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
twofactor_incomplete::table
|
||||
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
|
||||
@@ -88,7 +101,11 @@ impl TwoFactorIncomplete {
|
||||
Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await
|
||||
}
|
||||
|
||||
pub async fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_by_user_and_device(
|
||||
user_uuid: &UserId,
|
||||
device_uuid: &DeviceId,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor_incomplete::table
|
||||
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
|
||||
@@ -98,7 +115,7 @@ impl TwoFactorIncomplete {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
use crate::util::{format_date, get_uuid, retry};
|
||||
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||
use derive_more::{AsRef, Deref, Display, From};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::crypto;
|
||||
use crate::CONFIG;
|
||||
use super::{
|
||||
Cipher, Device, EmergencyAccess, Favorite, Folder, Membership, MembershipType, TwoFactor, TwoFactorIncomplete,
|
||||
};
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
crypto,
|
||||
db::DbConn,
|
||||
error::MapResult,
|
||||
util::{format_date, get_uuid, retry},
|
||||
CONFIG,
|
||||
};
|
||||
use macros::UuidFromParam;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
@@ -11,7 +21,7 @@ db_object! {
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct User {
|
||||
pub uuid: String,
|
||||
pub uuid: UserId,
|
||||
pub enabled: bool,
|
||||
pub created_at: NaiveDateTime,
|
||||
pub updated_at: NaiveDateTime,
|
||||
@@ -91,7 +101,7 @@ impl User {
|
||||
let email = email.to_lowercase();
|
||||
|
||||
Self {
|
||||
uuid: get_uuid(),
|
||||
uuid: UserId(get_uuid()),
|
||||
enabled: true,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
@@ -214,20 +224,11 @@ impl User {
|
||||
}
|
||||
}
|
||||
|
||||
use super::{
|
||||
Cipher, Device, EmergencyAccess, Favorite, Folder, Send, TwoFactor, TwoFactorIncomplete, UserOrgType,
|
||||
UserOrganization,
|
||||
};
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl User {
|
||||
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
|
||||
let mut orgs_json = Vec::new();
|
||||
for c in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await {
|
||||
for c in Membership::find_confirmed_by_user(&self.uuid, conn).await {
|
||||
orgs_json.push(c.to_json(conn).await);
|
||||
}
|
||||
|
||||
@@ -266,8 +267,8 @@ impl User {
|
||||
}
|
||||
|
||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||
if self.email.trim().is_empty() {
|
||||
err!("User email can't be empty")
|
||||
if !crate::util::is_valid_email(&self.email) {
|
||||
err!(format!("User email {} is not a valid email address", self.email))
|
||||
}
|
||||
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
@@ -304,19 +305,18 @@ impl User {
|
||||
}
|
||||
|
||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||
for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await {
|
||||
if user_org.atype == UserOrgType::Owner
|
||||
&& UserOrganization::count_confirmed_by_org_and_type(&user_org.org_uuid, UserOrgType::Owner, conn).await
|
||||
<= 1
|
||||
for member in Membership::find_confirmed_by_user(&self.uuid, conn).await {
|
||||
if member.atype == MembershipType::Owner
|
||||
&& Membership::count_confirmed_by_org_and_type(&member.org_uuid, MembershipType::Owner, conn).await <= 1
|
||||
{
|
||||
err!("Can't delete last owner")
|
||||
}
|
||||
}
|
||||
|
||||
Send::delete_all_by_user(&self.uuid, conn).await?;
|
||||
super::Send::delete_all_by_user(&self.uuid, conn).await?;
|
||||
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
||||
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
|
||||
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Membership::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Folder::delete_all_by_user(&self.uuid, conn).await?;
|
||||
@@ -332,7 +332,7 @@ impl User {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn update_uuid_revision(uuid: &str, conn: &mut DbConn) {
|
||||
pub async fn update_uuid_revision(uuid: &UserId, conn: &mut DbConn) {
|
||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
||||
warn!("Failed to update revision for {}: {:#?}", uuid, e);
|
||||
}
|
||||
@@ -357,7 +357,7 @@ impl User {
|
||||
Self::_update_revision(&self.uuid, &self.updated_at, conn).await
|
||||
}
|
||||
|
||||
async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
||||
async fn _update_revision(uuid: &UserId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! {conn: {
|
||||
retry(|| {
|
||||
diesel::update(users::table.filter(users::uuid.eq(uuid)))
|
||||
@@ -379,7 +379,7 @@ impl User {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
users::table.filter(users::uuid.eq(uuid)).first::<UserDb>(conn).ok().from_db()
|
||||
}}
|
||||
@@ -408,8 +408,8 @@ impl Invitation {
|
||||
}
|
||||
|
||||
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
if self.email.trim().is_empty() {
|
||||
err!("Invitation email can't be empty")
|
||||
if !crate::util::is_valid_email(&self.email) {
|
||||
err!(format!("Invitation email {} is not a valid email address", self.email))
|
||||
}
|
||||
|
||||
db_run! {conn:
|
||||
@@ -458,3 +458,23 @@ impl Invitation {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
DieselNewType,
|
||||
FromForm,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
AsRef,
|
||||
Deref,
|
||||
Display,
|
||||
From,
|
||||
UuidFromParam,
|
||||
)]
|
||||
#[deref(forward)]
|
||||
#[from(forward)]
|
||||
pub struct UserId(String);
|
||||
|
||||
@@ -226,6 +226,7 @@ table! {
|
||||
collection_uuid -> Text,
|
||||
read_only -> Bool,
|
||||
hide_passwords -> Bool,
|
||||
manage -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,6 +296,7 @@ table! {
|
||||
groups_uuid -> Text,
|
||||
read_only -> Bool,
|
||||
hide_passwords -> Bool,
|
||||
manage -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -226,6 +226,7 @@ table! {
|
||||
collection_uuid -> Text,
|
||||
read_only -> Bool,
|
||||
hide_passwords -> Bool,
|
||||
manage -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,6 +296,7 @@ table! {
|
||||
groups_uuid -> Text,
|
||||
read_only -> Bool,
|
||||
hide_passwords -> Bool,
|
||||
manage -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -226,6 +226,7 @@ table! {
|
||||
collection_uuid -> Text,
|
||||
read_only -> Bool,
|
||||
hide_passwords -> Bool,
|
||||
manage -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,6 +296,7 @@ table! {
|
||||
groups_uuid -> Text,
|
||||
read_only -> Bool,
|
||||
hide_passwords -> Bool,
|
||||
manage -> Bool,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
108
src/mail.rs
108
src/mail.rs
@@ -1,7 +1,6 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use chrono::NaiveDateTime;
|
||||
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
|
||||
use std::{env::consts::EXE_SUFFIX, str::FromStr};
|
||||
|
||||
use lettre::{
|
||||
message::{Attachment, Body, Mailbox, Message, MultiPart, SinglePart},
|
||||
@@ -17,7 +16,7 @@ use crate::{
|
||||
encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims,
|
||||
generate_verify_email_claims,
|
||||
},
|
||||
db::models::{Device, DeviceType, User},
|
||||
db::models::{Device, DeviceType, EmergencyAccessId, MembershipId, OrganizationId, User, UserId},
|
||||
error::Error,
|
||||
CONFIG,
|
||||
};
|
||||
@@ -26,7 +25,7 @@ fn sendmail_transport() -> AsyncSendmailTransport<Tokio1Executor> {
|
||||
if let Some(command) = CONFIG.sendmail_command() {
|
||||
AsyncSendmailTransport::new_with_command(command)
|
||||
} else {
|
||||
AsyncSendmailTransport::new()
|
||||
AsyncSendmailTransport::new_with_command(format!("sendmail{EXE_SUFFIX}"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,7 +95,31 @@ fn smtp_transport() -> AsyncSmtpTransport<Tokio1Executor> {
|
||||
smtp_client.build()
|
||||
}
|
||||
|
||||
// This will sanitize the string values by stripping all the html tags to prevent XSS and HTML Injections
|
||||
fn sanitize_data(data: &mut serde_json::Value) {
|
||||
use regex::Regex;
|
||||
use std::sync::LazyLock;
|
||||
static RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"<[^>]+>").unwrap());
|
||||
|
||||
match data {
|
||||
serde_json::Value::String(s) => *s = RE.replace_all(s, "").to_string(),
|
||||
serde_json::Value::Object(obj) => {
|
||||
for d in obj.values_mut() {
|
||||
sanitize_data(d);
|
||||
}
|
||||
}
|
||||
serde_json::Value::Array(arr) => {
|
||||
for d in arr.iter_mut() {
|
||||
sanitize_data(d);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_text(template_name: &'static str, data: serde_json::Value) -> Result<(String, String, String), Error> {
|
||||
let mut data = data;
|
||||
sanitize_data(&mut data);
|
||||
let (subject_html, body_html) = get_template(&format!("{template_name}.html"), &data)?;
|
||||
let (_subject_text, body_text) = get_template(template_name, &data)?;
|
||||
Ok((subject_html, body_html, body_text))
|
||||
@@ -116,6 +139,10 @@ fn get_template(template_name: &str, data: &serde_json::Value) -> Result<(String
|
||||
None => err!("Template doesn't contain body"),
|
||||
};
|
||||
|
||||
if text_split.next().is_some() {
|
||||
err!("Template contains more than one body");
|
||||
}
|
||||
|
||||
Ok((subject, body))
|
||||
}
|
||||
|
||||
@@ -138,8 +165,8 @@ pub async fn send_password_hint(address: &str, hint: Option<String>) -> EmptyRes
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
|
||||
let claims = generate_delete_claims(uuid.to_string());
|
||||
pub async fn send_delete_account(address: &str, user_id: &UserId) -> EmptyResult {
|
||||
let claims = generate_delete_claims(user_id.to_string());
|
||||
let delete_token = encode_jwt(&claims);
|
||||
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
@@ -147,7 +174,7 @@ pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"user_id": uuid,
|
||||
"user_id": user_id,
|
||||
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
||||
"token": delete_token,
|
||||
}),
|
||||
@@ -156,8 +183,8 @@ pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub async fn send_verify_email(address: &str, uuid: &str) -> EmptyResult {
|
||||
let claims = generate_verify_email_claims(uuid.to_string());
|
||||
pub async fn send_verify_email(address: &str, user_id: &UserId) -> EmptyResult {
|
||||
let claims = generate_verify_email_claims(user_id.clone());
|
||||
let verify_email_token = encode_jwt(&claims);
|
||||
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
@@ -165,7 +192,7 @@ pub async fn send_verify_email(address: &str, uuid: &str) -> EmptyResult {
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"user_id": uuid,
|
||||
"user_id": user_id,
|
||||
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
||||
"token": verify_email_token,
|
||||
}),
|
||||
@@ -186,8 +213,8 @@ pub async fn send_welcome(address: &str) -> EmptyResult {
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub async fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult {
|
||||
let claims = generate_verify_email_claims(uuid.to_string());
|
||||
pub async fn send_welcome_must_verify(address: &str, user_id: &UserId) -> EmptyResult {
|
||||
let claims = generate_verify_email_claims(user_id.clone());
|
||||
let verify_email_token = encode_jwt(&claims);
|
||||
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
@@ -195,7 +222,7 @@ pub async fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"user_id": uuid,
|
||||
"user_id": user_id,
|
||||
"token": verify_email_token,
|
||||
}),
|
||||
)?;
|
||||
@@ -231,8 +258,8 @@ pub async fn send_single_org_removed_from_org(address: &str, org_name: &str) ->
|
||||
|
||||
pub async fn send_invite(
|
||||
user: &User,
|
||||
org_id: Option<String>,
|
||||
org_user_id: Option<String>,
|
||||
org_id: OrganizationId,
|
||||
member_id: MembershipId,
|
||||
org_name: &str,
|
||||
invited_by_email: Option<String>,
|
||||
) -> EmptyResult {
|
||||
@@ -240,7 +267,7 @@ pub async fn send_invite(
|
||||
user.uuid.clone(),
|
||||
user.email.clone(),
|
||||
org_id.clone(),
|
||||
org_user_id.clone(),
|
||||
member_id.clone(),
|
||||
invited_by_email,
|
||||
);
|
||||
let invite_token = encode_jwt(&claims);
|
||||
@@ -250,25 +277,23 @@ pub async fn send_invite(
|
||||
query_params
|
||||
.append_pair("email", &user.email)
|
||||
.append_pair("organizationName", org_name)
|
||||
.append_pair("organizationId", org_id.as_deref().unwrap_or("_"))
|
||||
.append_pair("organizationUserId", org_user_id.as_deref().unwrap_or("_"))
|
||||
.append_pair("organizationId", &org_id)
|
||||
.append_pair("organizationUserId", &member_id)
|
||||
.append_pair("token", &invite_token);
|
||||
if user.private_key.is_some() {
|
||||
query_params.append_pair("orgUserHasExistingUser", "true");
|
||||
}
|
||||
}
|
||||
|
||||
let query_string = match query.query() {
|
||||
None => err!(format!("Failed to build invite URL query parameters")),
|
||||
Some(query) => query,
|
||||
let Some(query_string) = query.query() else {
|
||||
err!("Failed to build invite URL query parameters")
|
||||
};
|
||||
|
||||
// `url.Url` would place the anchor `#` after the query parameters
|
||||
let url = format!("{}/#/accept-organization/?{}", CONFIG.domain(), query_string);
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/send_org_invite",
|
||||
json!({
|
||||
"url": url,
|
||||
// `url.Url` would place the anchor `#` after the query parameters
|
||||
"url": format!("{}/#/accept-organization/?{}", CONFIG.domain(), query_string),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"org_name": org_name,
|
||||
}),
|
||||
@@ -279,30 +304,41 @@ pub async fn send_invite(
|
||||
|
||||
pub async fn send_emergency_access_invite(
|
||||
address: &str,
|
||||
uuid: &str,
|
||||
emer_id: &str,
|
||||
user_id: UserId,
|
||||
emer_id: EmergencyAccessId,
|
||||
grantor_name: &str,
|
||||
grantor_email: &str,
|
||||
) -> EmptyResult {
|
||||
let claims = generate_emergency_access_invite_claims(
|
||||
String::from(uuid),
|
||||
user_id,
|
||||
String::from(address),
|
||||
String::from(emer_id),
|
||||
emer_id.clone(),
|
||||
String::from(grantor_name),
|
||||
String::from(grantor_email),
|
||||
);
|
||||
|
||||
let invite_token = encode_jwt(&claims);
|
||||
// Build the query here to ensure proper escaping
|
||||
let mut query = url::Url::parse("https://query.builder").unwrap();
|
||||
{
|
||||
let mut query_params = query.query_pairs_mut();
|
||||
query_params
|
||||
.append_pair("id", &emer_id.to_string())
|
||||
.append_pair("name", grantor_name)
|
||||
.append_pair("email", address)
|
||||
.append_pair("token", &encode_jwt(&claims));
|
||||
}
|
||||
|
||||
let Some(query_string) = query.query() else {
|
||||
err!("Failed to build emergency invite URL query parameters")
|
||||
};
|
||||
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/send_emergency_access_invite",
|
||||
json!({
|
||||
"url": CONFIG.domain(),
|
||||
// `url.Url` would place the anchor `#` after the query parameters
|
||||
"url": format!("{}/#/accept-emergency/?{query_string}", CONFIG.domain()),
|
||||
"img_src": CONFIG._smtp_img_src(),
|
||||
"emer_id": emer_id,
|
||||
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
|
||||
"grantor_name": grantor_name,
|
||||
"token": invite_token,
|
||||
}),
|
||||
)?;
|
||||
|
||||
@@ -558,13 +594,13 @@ async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => {
|
||||
if e.is_client() {
|
||||
debug!("Sendmail client error: {:#?}", e);
|
||||
debug!("Sendmail client error: {:?}", e);
|
||||
err!(format!("Sendmail client error: {e}"));
|
||||
} else if e.is_response() {
|
||||
debug!("Sendmail response error: {:#?}", e);
|
||||
debug!("Sendmail response error: {:?}", e);
|
||||
err!(format!("Sendmail response error: {e}"));
|
||||
} else {
|
||||
debug!("Sendmail error: {:#?}", e);
|
||||
debug!("Sendmail error: {:?}", e);
|
||||
err!(format!("Sendmail error: {e}"));
|
||||
}
|
||||
}
|
||||
|
||||
41
src/main.rs
41
src/main.rs
@@ -24,6 +24,8 @@ extern crate log;
|
||||
extern crate diesel;
|
||||
#[macro_use]
|
||||
extern crate diesel_migrations;
|
||||
#[macro_use]
|
||||
extern crate diesel_derive_newtype;
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
@@ -67,7 +69,7 @@ pub use util::is_running_in_container;
|
||||
|
||||
#[rocket::main]
|
||||
async fn main() -> Result<(), Error> {
|
||||
parse_args();
|
||||
parse_args().await;
|
||||
launch_info();
|
||||
|
||||
let level = init_logging()?;
|
||||
@@ -115,7 +117,7 @@ PRESETS: m= t= p=
|
||||
|
||||
pub const VERSION: Option<&str> = option_env!("VW_VERSION");
|
||||
|
||||
fn parse_args() {
|
||||
async fn parse_args() {
|
||||
let mut pargs = pico_args::Arguments::from_env();
|
||||
let version = VERSION.unwrap_or("(Version info from Git not present)");
|
||||
|
||||
@@ -186,7 +188,7 @@ fn parse_args() {
|
||||
exit(1);
|
||||
}
|
||||
} else if command == "backup" {
|
||||
match backup_sqlite() {
|
||||
match backup_sqlite().await {
|
||||
Ok(f) => {
|
||||
println!("Backup to '{f}' was successful");
|
||||
exit(0);
|
||||
@@ -201,25 +203,20 @@ fn parse_args() {
|
||||
}
|
||||
}
|
||||
|
||||
fn backup_sqlite() -> Result<String, Error> {
|
||||
#[cfg(sqlite)]
|
||||
{
|
||||
use crate::db::{backup_sqlite_database, DbConnType};
|
||||
if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false) {
|
||||
use diesel::Connection;
|
||||
let url = CONFIG.database_url();
|
||||
async fn backup_sqlite() -> Result<String, Error> {
|
||||
use crate::db::{backup_database, DbConnType};
|
||||
if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false) {
|
||||
// Establish a connection to the sqlite database
|
||||
let mut conn = db::DbPool::from_config()
|
||||
.expect("SQLite database connection failed")
|
||||
.get()
|
||||
.await
|
||||
.expect("Unable to get SQLite db pool");
|
||||
|
||||
// Establish a connection to the sqlite database
|
||||
let mut conn = diesel::sqlite::SqliteConnection::establish(&url)?;
|
||||
let backup_file = backup_sqlite_database(&mut conn)?;
|
||||
Ok(backup_file)
|
||||
} else {
|
||||
err_silent!("The database type is not SQLite. Backups only works for SQLite databases")
|
||||
}
|
||||
}
|
||||
#[cfg(not(sqlite))]
|
||||
{
|
||||
err_silent!("The 'sqlite' feature is not enabled. Backups only works for SQLite databases")
|
||||
let backup_file = backup_database(&mut conn).await?;
|
||||
Ok(backup_file)
|
||||
} else {
|
||||
err_silent!("The database type is not SQLite. Backups only works for SQLite databases")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -610,7 +607,7 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
|
||||
// If we need more signals to act upon, we might want to use select! here.
|
||||
// With only one item to listen for this is enough.
|
||||
let _ = signal_user1.recv().await;
|
||||
match backup_sqlite() {
|
||||
match backup_sqlite().await {
|
||||
Ok(f) => info!("Backup to '{f}' was successful"),
|
||||
Err(e) => error!("Backup failed. {e:?}"),
|
||||
}
|
||||
|
||||
4
src/static/scripts/admin.css
vendored
4
src/static/scripts/admin.css
vendored
@@ -38,8 +38,8 @@ img {
|
||||
max-width: 130px;
|
||||
}
|
||||
#users-table .vw-actions, #orgs-table .vw-actions {
|
||||
min-width: 130px;
|
||||
max-width: 130px;
|
||||
min-width: 135px;
|
||||
max-width: 140px;
|
||||
}
|
||||
#users-table .vw-org-cell {
|
||||
max-height: 120px;
|
||||
|
||||
215
src/static/scripts/admin_diagnostics.js
vendored
215
src/static/scripts/admin_diagnostics.js
vendored
@@ -7,6 +7,8 @@ var timeCheck = false;
|
||||
var ntpTimeCheck = false;
|
||||
var domainCheck = false;
|
||||
var httpsCheck = false;
|
||||
var websocketCheck = false;
|
||||
var httpResponseCheck = false;
|
||||
|
||||
// ================================
|
||||
// Date & Time Check
|
||||
@@ -76,18 +78,15 @@ async function generateSupportString(event, dj) {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
|
||||
let supportString = "### Your environment (Generated via diagnostics page)\n";
|
||||
let supportString = "### Your environment (Generated via diagnostics page)\n\n";
|
||||
|
||||
supportString += `* Vaultwarden version: v${dj.current_release}\n`;
|
||||
supportString += `* Web-vault version: v${dj.web_vault_version}\n`;
|
||||
supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`;
|
||||
supportString += `* Running within a container: ${dj.running_within_container} (Base: ${dj.container_base_image})\n`;
|
||||
supportString += "* Environment settings overridden: ";
|
||||
if (dj.overrides != "") {
|
||||
supportString += "true\n";
|
||||
} else {
|
||||
supportString += "false\n";
|
||||
}
|
||||
supportString += `* Database type: ${dj.db_type}\n`;
|
||||
supportString += `* Database version: ${dj.db_version}\n`;
|
||||
supportString += `* Environment settings overridden!: ${dj.overrides !== ""}\n`;
|
||||
supportString += `* Uses a reverse proxy: ${dj.ip_header_exists}\n`;
|
||||
if (dj.ip_header_exists) {
|
||||
supportString += `* IP Header check: ${dj.ip_header_match} (${dj.ip_header_name})\n`;
|
||||
@@ -99,11 +98,12 @@ async function generateSupportString(event, dj) {
|
||||
supportString += `* Server/NTP Time Check: ${ntpTimeCheck}\n`;
|
||||
supportString += `* Domain Configuration Check: ${domainCheck}\n`;
|
||||
supportString += `* HTTPS Check: ${httpsCheck}\n`;
|
||||
supportString += `* Database type: ${dj.db_type}\n`;
|
||||
supportString += `* Database version: ${dj.db_version}\n`;
|
||||
supportString += "* Clients used: \n";
|
||||
supportString += "* Reverse proxy and version: \n";
|
||||
supportString += "* Other relevant information: \n";
|
||||
if (dj.enable_websocket) {
|
||||
supportString += `* Websocket Check: ${websocketCheck}\n`;
|
||||
} else {
|
||||
supportString += "* Websocket Check: disabled\n";
|
||||
}
|
||||
supportString += `* HTTP Response Checks: ${httpResponseCheck}\n`;
|
||||
|
||||
const jsonResponse = await fetch(`${BASE_URL}/admin/diagnostics/config`, {
|
||||
"headers": { "Accept": "application/json" }
|
||||
@@ -113,10 +113,30 @@ async function generateSupportString(event, dj) {
|
||||
throw new Error(jsonResponse);
|
||||
}
|
||||
const configJson = await jsonResponse.json();
|
||||
supportString += "\n### Config (Generated via diagnostics page)\n<details><summary>Show Running Config</summary>\n";
|
||||
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
|
||||
supportString += "\n\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n</details>\n";
|
||||
|
||||
// Start Config and Details section within a details block which is collapsed by default
|
||||
supportString += "\n### Config & Details (Generated via diagnostics page)\n\n";
|
||||
supportString += "<details><summary>Show Config & Details</summary>\n";
|
||||
|
||||
// Add overrides if they exists
|
||||
if (dj.overrides != "") {
|
||||
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
|
||||
}
|
||||
|
||||
// Add http response check messages if they exists
|
||||
if (httpResponseCheck === false) {
|
||||
supportString += "\n**Failed HTTP Checks:**\n";
|
||||
// We use `innerText` here since that will convert <br> into new-lines
|
||||
supportString += "\n```yaml\n" + document.getElementById("http-response-errors").innerText.trim() + "\n```\n";
|
||||
}
|
||||
|
||||
// Add the current config in json form
|
||||
supportString += "\n**Config:**\n";
|
||||
supportString += "\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n";
|
||||
|
||||
supportString += "\n</details>\n";
|
||||
|
||||
// Add the support string to the textbox so it can be viewed and copied
|
||||
document.getElementById("support-string").textContent = supportString;
|
||||
document.getElementById("support-string").classList.remove("d-none");
|
||||
document.getElementById("copy-support").classList.remove("d-none");
|
||||
@@ -199,6 +219,165 @@ function checkDns(dns_resolved) {
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchCheckUrl(url) {
|
||||
try {
|
||||
const response = await fetch(url);
|
||||
return { headers: response.headers, status: response.status, text: await response.text() };
|
||||
} catch (error) {
|
||||
console.error(`Error fetching ${url}: ${error}`);
|
||||
return { error };
|
||||
}
|
||||
}
|
||||
|
||||
function checkSecurityHeaders(headers, omit) {
|
||||
let securityHeaders = {
|
||||
"x-frame-options": ["SAMEORIGIN"],
|
||||
"x-content-type-options": ["nosniff"],
|
||||
"referrer-policy": ["same-origin"],
|
||||
"x-xss-protection": ["0"],
|
||||
"x-robots-tag": ["noindex", "nofollow"],
|
||||
"cross-origin-resource-policy": ["same-origin"],
|
||||
"content-security-policy": [
|
||||
"default-src 'none'",
|
||||
"font-src 'self'",
|
||||
"manifest-src 'self'",
|
||||
"base-uri 'self'",
|
||||
"form-action 'self'",
|
||||
"object-src 'self' blob:",
|
||||
"script-src 'self' 'wasm-unsafe-eval'",
|
||||
"style-src 'self' 'unsafe-inline'",
|
||||
"child-src 'self' https://*.duosecurity.com https://*.duofederal.com",
|
||||
"frame-src 'self' https://*.duosecurity.com https://*.duofederal.com",
|
||||
"frame-ancestors 'self' chrome-extension://nngceckbapebfimnlniiiahkandclblb chrome-extension://jbkfoedolllekgbhcbcoahefnbanhhlh moz-extension://*",
|
||||
"img-src 'self' data: https://haveibeenpwned.com",
|
||||
"connect-src 'self' https://api.pwnedpasswords.com https://api.2fa.directory https://app.simplelogin.io/api/ https://app.addy.io/api/ https://api.fastmail.com/ https://api.forwardemail.net",
|
||||
]
|
||||
};
|
||||
|
||||
let messages = [];
|
||||
for (let header in securityHeaders) {
|
||||
// Skip some headers for specific endpoints if needed
|
||||
if (typeof omit === "object" && omit.includes(header) === true) {
|
||||
continue;
|
||||
}
|
||||
// If the header exists, check if the contents matches what we expect it to be
|
||||
let headerValue = headers.get(header);
|
||||
if (headerValue !== null) {
|
||||
securityHeaders[header].forEach((expectedValue) => {
|
||||
if (headerValue.indexOf(expectedValue) === -1) {
|
||||
messages.push(`'${header}' does not contain '${expectedValue}'`);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
messages.push(`'${header}' is missing!`);
|
||||
}
|
||||
}
|
||||
return messages;
|
||||
}
|
||||
|
||||
async function checkHttpResponse() {
|
||||
const [apiConfig, webauthnConnector, notFound, notFoundApi, badRequest, unauthorized, forbidden] = await Promise.all([
|
||||
fetchCheckUrl(`${BASE_URL}/api/config`),
|
||||
fetchCheckUrl(`${BASE_URL}/webauthn-connector.html`),
|
||||
fetchCheckUrl(`${BASE_URL}/admin/does-not-exist`),
|
||||
fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=404`),
|
||||
fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=400`),
|
||||
fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=401`),
|
||||
fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=403`),
|
||||
]);
|
||||
|
||||
const respErrorElm = document.getElementById("http-response-errors");
|
||||
|
||||
// Check and validate the default API header responses
|
||||
let apiErrors = checkSecurityHeaders(apiConfig.headers);
|
||||
if (apiErrors.length >= 1) {
|
||||
respErrorElm.innerHTML += "<b>API calls:</b><br>";
|
||||
apiErrors.forEach((errMsg) => {
|
||||
respErrorElm.innerHTML += `<b>Header:</b> ${errMsg}<br>`;
|
||||
});
|
||||
}
|
||||
|
||||
// Check the special `-connector.html` headers, these should have some headers omitted.
|
||||
const omitConnectorHeaders = ["x-frame-options", "content-security-policy"];
|
||||
let connectorErrors = checkSecurityHeaders(webauthnConnector.headers, omitConnectorHeaders);
|
||||
omitConnectorHeaders.forEach((header) => {
|
||||
if (webauthnConnector.headers.get(header) !== null) {
|
||||
connectorErrors.push(`'${header}' is present while it should not`);
|
||||
}
|
||||
});
|
||||
if (connectorErrors.length >= 1) {
|
||||
respErrorElm.innerHTML += "<b>2FA Connector calls:</b><br>";
|
||||
connectorErrors.forEach((errMsg) => {
|
||||
respErrorElm.innerHTML += `<b>Header:</b> ${errMsg}<br>`;
|
||||
});
|
||||
}
|
||||
|
||||
// Check specific error code responses if they are not re-written by a reverse proxy
|
||||
let responseErrors = [];
|
||||
if (notFound.status !== 404 || notFound.text.indexOf("return to the web-vault") === -1) {
|
||||
responseErrors.push("404 (Not Found) HTML is invalid");
|
||||
}
|
||||
|
||||
if (notFoundApi.status !== 404 || notFoundApi.text.indexOf("\"message\":\"Testing error 404 response\",") === -1) {
|
||||
responseErrors.push("404 (Not Found) JSON is invalid");
|
||||
}
|
||||
|
||||
if (badRequest.status !== 400 || badRequest.text.indexOf("\"message\":\"Testing error 400 response\",") === -1) {
|
||||
responseErrors.push("400 (Bad Request) is invalid");
|
||||
}
|
||||
|
||||
if (unauthorized.status !== 401 || unauthorized.text.indexOf("\"message\":\"Testing error 401 response\",") === -1) {
|
||||
responseErrors.push("401 (Unauthorized) is invalid");
|
||||
}
|
||||
|
||||
if (forbidden.status !== 403 || forbidden.text.indexOf("\"message\":\"Testing error 403 response\",") === -1) {
|
||||
responseErrors.push("403 (Forbidden) is invalid");
|
||||
}
|
||||
|
||||
if (responseErrors.length >= 1) {
|
||||
respErrorElm.innerHTML += "<b>HTTP error responses:</b><br>";
|
||||
responseErrors.forEach((errMsg) => {
|
||||
respErrorElm.innerHTML += `<b>Response to:</b> ${errMsg}<br>`;
|
||||
});
|
||||
}
|
||||
|
||||
if (responseErrors.length >= 1 || connectorErrors.length >= 1 || apiErrors.length >= 1) {
|
||||
document.getElementById("http-response-warning").classList.remove("d-none");
|
||||
} else {
|
||||
httpResponseCheck = true;
|
||||
document.getElementById("http-response-success").classList.remove("d-none");
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchWsUrl(wsUrl) {
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
const ws = new WebSocket(wsUrl);
|
||||
ws.onopen = () => {
|
||||
ws.close();
|
||||
resolve(true);
|
||||
};
|
||||
|
||||
ws.onerror = () => {
|
||||
reject(false);
|
||||
};
|
||||
} catch (_) {
|
||||
reject(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function checkWebsocketConnection() {
|
||||
// Test Websocket connections via the anonymous (login with device) connection
|
||||
const isConnected = await fetchWsUrl(`${BASE_URL}/notifications/anonymous-hub?token=admin-diagnostics`).catch(() => false);
|
||||
if (isConnected) {
|
||||
websocketCheck = true;
|
||||
document.getElementById("websocket-success").classList.remove("d-none");
|
||||
} else {
|
||||
document.getElementById("websocket-error").classList.remove("d-none");
|
||||
}
|
||||
}
|
||||
|
||||
function init(dj) {
|
||||
// Time check
|
||||
document.getElementById("time-browser-string").textContent = browserUTC;
|
||||
@@ -225,6 +404,12 @@ function init(dj) {
|
||||
|
||||
// DNS Check
|
||||
checkDns(dj.dns_resolved);
|
||||
|
||||
checkHttpResponse();
|
||||
|
||||
if (dj.enable_websocket) {
|
||||
checkWebsocketConnection();
|
||||
}
|
||||
}
|
||||
|
||||
// onLoad events
|
||||
|
||||
2
src/static/scripts/admin_users.js
vendored
2
src/static/scripts/admin_users.js
vendored
@@ -152,7 +152,7 @@ const ORG_TYPES = {
|
||||
"name": "User",
|
||||
"bg": "blue"
|
||||
},
|
||||
"3": {
|
||||
"4": {
|
||||
"name": "Manager",
|
||||
"bg": "green"
|
||||
},
|
||||
|
||||
42
src/static/scripts/datatables.css
vendored
42
src/static/scripts/datatables.css
vendored
@@ -4,10 +4,10 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs5/dt-2.0.8
|
||||
* https://datatables.net/download/#bs5/dt-2.1.8
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 2.0.8
|
||||
* DataTables 2.1.8
|
||||
*/
|
||||
|
||||
@charset "UTF-8";
|
||||
@@ -45,15 +45,21 @@ table.dataTable tr.dt-hasChild td.dt-control:before {
|
||||
}
|
||||
|
||||
html.dark table.dataTable td.dt-control:before,
|
||||
:root[data-bs-theme=dark] table.dataTable td.dt-control:before {
|
||||
:root[data-bs-theme=dark] table.dataTable td.dt-control:before,
|
||||
:root[data-theme=dark] table.dataTable td.dt-control:before {
|
||||
border-left-color: rgba(255, 255, 255, 0.5);
|
||||
}
|
||||
html.dark table.dataTable tr.dt-hasChild td.dt-control:before,
|
||||
:root[data-bs-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before {
|
||||
:root[data-bs-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before,
|
||||
:root[data-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before {
|
||||
border-top-color: rgba(255, 255, 255, 0.5);
|
||||
border-left-color: transparent;
|
||||
}
|
||||
|
||||
div.dt-scroll {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.dt-scroll-body thead tr,
|
||||
div.dt-scroll-body tfoot tr {
|
||||
height: 0;
|
||||
@@ -377,6 +383,31 @@ table.table.dataTable.table-hover > tbody > tr.selected:hover > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.975);
|
||||
}
|
||||
|
||||
div.dt-container div.dt-layout-start > *:not(:last-child) {
|
||||
margin-right: 1em;
|
||||
}
|
||||
div.dt-container div.dt-layout-end > *:not(:first-child) {
|
||||
margin-left: 1em;
|
||||
}
|
||||
div.dt-container div.dt-layout-full {
|
||||
width: 100%;
|
||||
}
|
||||
div.dt-container div.dt-layout-full > *:only-child {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
div.dt-container div.dt-layout-table > div {
|
||||
display: block !important;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 767px) {
|
||||
div.dt-container div.dt-layout-start > *:not(:last-child) {
|
||||
margin-right: 0;
|
||||
}
|
||||
div.dt-container div.dt-layout-end > *:not(:first-child) {
|
||||
margin-left: 0;
|
||||
}
|
||||
}
|
||||
div.dt-container div.dt-length label {
|
||||
font-weight: normal;
|
||||
text-align: left;
|
||||
@@ -400,9 +431,6 @@ div.dt-container div.dt-search input {
|
||||
display: inline-block;
|
||||
width: auto;
|
||||
}
|
||||
div.dt-container div.dt-info {
|
||||
padding-top: 0.85em;
|
||||
}
|
||||
div.dt-container div.dt-paging {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
1414
src/static/scripts/datatables.js
vendored
1414
src/static/scripts/datatables.js
vendored
File diff suppressed because it is too large
Load Diff
@@ -132,6 +132,21 @@
|
||||
<span class="d-block" title="We have direct internet access, no outgoing proxy configured."><b>No</b></span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
<dt class="col-sm-5">Websocket enabled
|
||||
{{#if page_data.enable_websocket}}
|
||||
<span class="badge bg-success d-none" id="websocket-success" title="Websocket connection is working.">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="websocket-error" title="Websocket connection error, validate your reverse proxy configuration!">Error</span>
|
||||
{{/if}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
{{#if page_data.enable_websocket}}
|
||||
<span class="d-block" title="Websocket connections are enabled (ENABLE_WEBSOCKET is true)."><b>Yes</b></span>
|
||||
{{/if}}
|
||||
{{#unless page_data.enable_websocket}}
|
||||
<span class="d-block" title="Websocket connections are disabled (ENABLE_WEBSOCKET is false)."><b>No</b></span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
|
||||
<dt class="col-sm-5">DNS (github.com)
|
||||
<span class="badge bg-success d-none" id="dns-success" title="DNS Resolving works!">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
|
||||
@@ -167,6 +182,14 @@
|
||||
<span id="domain-server" class="d-block"><b>Server:</b> <span id="domain-server-string">{{page_data.admin_url}}</span></span>
|
||||
<span id="domain-browser" class="d-block"><b>Browser:</b> <span id="domain-browser-string"></span></span>
|
||||
</dd>
|
||||
|
||||
<dt class="col-sm-5">HTTP Response validation
|
||||
<span class="badge bg-success d-none" id="http-response-success" title="All headers and HTTP request responses seem to be ok.">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="http-response-warning" title="Some headers or HTTP request responses return invalid data!">Error</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="http-response-errors" class="d-block"></span>
|
||||
</dd>
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
<tr>
|
||||
<td>
|
||||
<svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{email}}">
|
||||
<div class="float-start">
|
||||
<div>
|
||||
<strong>{{name}}</strong>
|
||||
<span class="d-block">{{email}}</span>
|
||||
<span class="d-block">
|
||||
|
||||
@@ -2,7 +2,7 @@ Emergency access for {{{grantor_name}}}
|
||||
<!---------------->
|
||||
You have been invited to become an emergency contact for {{grantor_name}}. To accept this invite, click the following link:
|
||||
|
||||
Click here to join: {{url}}/#/accept-emergency/?id={{emer_id}}&name={{grantor_name}}&email={{email}}&token={{token}}
|
||||
Click here to join: {{{url}}}
|
||||
|
||||
If you do not wish to become an emergency contact for {{grantor_name}}, you can safely ignore this email.
|
||||
{{> email/email_footer_text }}
|
||||
|
||||
@@ -9,7 +9,7 @@ Emergency access for {{{grantor_name}}}
|
||||
</tr>
|
||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||
<a href="{{url}}/#/accept-emergency/?id={{emer_id}}&name={{grantor_name}}&email={{email}}&token={{token}}"
|
||||
<a href="{{{url}}}"
|
||||
clicktracking=off target="_blank" style="color: #ffffff; text-decoration: none; text-align: center; cursor: pointer; display: inline-block; border-radius: 5px; background-color: #3c8dbc; border-color: #3c8dbc; border-style: solid; border-width: 10px 20px; margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||
Become emergency contact
|
||||
</a>
|
||||
@@ -21,4 +21,4 @@ Emergency access for {{{grantor_name}}}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{{> email/email_footer }}
|
||||
{{> email/email_footer }}
|
||||
|
||||
1
src/static/templates/scss/user.vaultwarden.scss.hbs
Normal file
1
src/static/templates/scss/user.vaultwarden.scss.hbs
Normal file
@@ -0,0 +1 @@
|
||||
/* See the wiki for examples and details: https://github.com/dani-garcia/vaultwarden/wiki/Customize-Vaultwarden-CSS */
|
||||
134
src/static/templates/scss/vaultwarden.scss.hbs
Normal file
134
src/static/templates/scss/vaultwarden.scss.hbs
Normal file
@@ -0,0 +1,134 @@
|
||||
/**** START Static Vaultwarden changes ****/
|
||||
/* This combines all selectors extending it into one */
|
||||
%vw-hide {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* This allows searching for the combined style in the browsers dev-tools (look into the head tag) */
|
||||
.vw-hide,
|
||||
head {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide the Subscription Page tab */
|
||||
bit-nav-item[route="settings/subscription"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide any link pointing to Free Bitwarden Families */
|
||||
a[href$="/settings/sponsored-families"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide the `Enterprise Single Sign-On` button on the login page */
|
||||
a[routerlink="/sso"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Two-Factor menu in Organization settings */
|
||||
bit-nav-item[route="settings/two-factor"],
|
||||
a[href$="/settings/two-factor"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Business Owned checkbox */
|
||||
app-org-info > form:nth-child(1) > div:nth-child(3) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide the `This account is owned by a business` checkbox and label */
|
||||
#ownedBusiness,
|
||||
label[for^="ownedBusiness"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Business Name */
|
||||
app-org-account form div bit-form-field.tw-block:nth-child(3) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide organization plans */
|
||||
app-organization-plans > form > bit-section:nth-child(2) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Collection Management Form */
|
||||
app-org-account form.ng-untouched:nth-child(6) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide 'Member Access' Report Card from Org Reports */
|
||||
app-org-reports-home > app-report-list > div.tw-inline-grid > div:nth-child(6) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Device Verification form at the Two Step Login screen */
|
||||
app-security > app-two-factor-setup > form {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide unsupported Custom Role options */
|
||||
bit-dialog div.tw-ml-4:has(bit-form-control input),
|
||||
bit-dialog div.tw-col-span-4:has(input[formcontrolname*="access"], input[formcontrolname*="manage"]) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Hide Log in with passkey */
|
||||
app-login div.tw-flex:nth-child(4) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
||||
/* Change collapsed menu icon to Vaultwarden */
|
||||
bit-nav-logo bit-nav-item a:before {
|
||||
content: "";
|
||||
background-image: url("../images/icon-white.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center center;
|
||||
height: 32px;
|
||||
display: block;
|
||||
}
|
||||
bit-nav-logo bit-nav-item .bwi-shield {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
/**** END Static Vaultwarden Changes ****/
|
||||
/**** START Dynamic Vaultwarden Changes ****/
|
||||
{{#if signup_disabled}}
|
||||
/* Hide the register link on the login screen */
|
||||
app-login form div + div + div + div + hr,
|
||||
app-login form div + div + div + div + hr + p {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/if}}
|
||||
|
||||
{{#unless mail_enabled}}
|
||||
/* Hide `Email` 2FA if mail is not enabled */
|
||||
app-two-factor-setup ul.list-group.list-group-2fa li.list-group-item:nth-child(1) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/unless}}
|
||||
|
||||
{{#unless yubico_enabled}}
|
||||
/* Hide `YubiKey OTP security key` 2FA if it is not enabled */
|
||||
app-two-factor-setup ul.list-group.list-group-2fa li.list-group-item:nth-child(4) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/unless}}
|
||||
|
||||
{{#unless emergency_access_allowed}}
|
||||
/* Hide Emergency Access if not allowed */
|
||||
bit-nav-item[route="settings/emergency-access"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/unless}}
|
||||
|
||||
{{#unless sends_allowed}}
|
||||
/* Hide Sends if not allowed */
|
||||
bit-nav-item[route="sends"] {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/unless}}
|
||||
/**** End Dynamic Vaultwarden Changes ****/
|
||||
/**** Include a special user stylesheet for custom changes ****/
|
||||
{{#if load_user_scss}}
|
||||
{{> scss/user.vaultwarden.scss }}
|
||||
{{/if}}
|
||||
88
src/util.rs
88
src/util.rs
@@ -1,13 +1,12 @@
|
||||
//
|
||||
// Web Headers and caching
|
||||
//
|
||||
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
|
||||
use std::{collections::HashMap, io::Cursor, path::Path};
|
||||
|
||||
use num_traits::ToPrimitive;
|
||||
use rocket::{
|
||||
fairing::{Fairing, Info, Kind},
|
||||
http::{ContentType, Header, HeaderMap, Method, Status},
|
||||
request::FromParam,
|
||||
response::{self, Responder},
|
||||
Data, Orbit, Request, Response, Rocket,
|
||||
};
|
||||
@@ -51,9 +50,16 @@ impl Fairing for AppHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: When modifying or adding security headers be sure to also update the diagnostic checks in `src/static/scripts/admin_diagnostics.js` in `checkSecurityHeaders`
|
||||
res.set_raw_header("Permissions-Policy", "accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()");
|
||||
res.set_raw_header("Referrer-Policy", "same-origin");
|
||||
res.set_raw_header("X-Content-Type-Options", "nosniff");
|
||||
res.set_raw_header("X-Robots-Tag", "noindex, nofollow");
|
||||
|
||||
if !res.headers().get_one("Content-Type").is_some_and(|v| v.starts_with("image/")) {
|
||||
res.set_raw_header("Cross-Origin-Resource-Policy", "same-origin");
|
||||
}
|
||||
|
||||
// Obsolete in modern browsers, unsafe (XS-Leak), and largely replaced by CSP
|
||||
res.set_raw_header("X-XSS-Protection", "0");
|
||||
|
||||
@@ -73,7 +79,9 @@ impl Fairing for AppHeaders {
|
||||
// # Mail Relay: https://bitwarden.com/blog/add-privacy-and-security-using-email-aliases-with-bitwarden/
|
||||
// app.simplelogin.io, app.addy.io, api.fastmail.com, quack.duckduckgo.com
|
||||
let csp = format!(
|
||||
"default-src 'self'; \
|
||||
"default-src 'none'; \
|
||||
font-src 'self'; \
|
||||
manifest-src 'self'; \
|
||||
base-uri 'self'; \
|
||||
form-action 'self'; \
|
||||
object-src 'self' blob:; \
|
||||
@@ -96,10 +104,11 @@ impl Fairing for AppHeaders {
|
||||
https://app.addy.io/api/ \
|
||||
https://api.fastmail.com/ \
|
||||
https://api.forwardemail.net \
|
||||
;\
|
||||
{allowed_connect_src};\
|
||||
",
|
||||
icon_service_csp = CONFIG._icon_service_csp(),
|
||||
allowed_iframe_ancestors = CONFIG.allowed_iframe_ancestors()
|
||||
allowed_iframe_ancestors = CONFIG.allowed_iframe_ancestors(),
|
||||
allowed_connect_src = CONFIG.allowed_connect_src(),
|
||||
);
|
||||
res.set_raw_header("Content-Security-Policy", csp);
|
||||
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
|
||||
@@ -220,42 +229,6 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SafeString(String);
|
||||
|
||||
impl fmt::Display for SafeString {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for SafeString {
|
||||
type Target = String;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<Path> for SafeString {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &Path {
|
||||
Path::new(&self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'r> FromParam<'r> for SafeString {
|
||||
type Error = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
|
||||
if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) {
|
||||
Ok(SafeString(param.to_string()))
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log all the routes from the main paths list, and the attachments endpoint
|
||||
// Effectively ignores, any static file route, and the alive endpoint
|
||||
const LOGGED_ROUTES: [&str; 7] = ["/api", "/admin", "/identity", "/icons", "/attachments", "/events", "/notifications"];
|
||||
@@ -438,13 +411,19 @@ pub fn get_env_bool(key: &str) -> Option<bool> {
|
||||
|
||||
use chrono::{DateTime, Local, NaiveDateTime, TimeZone};
|
||||
|
||||
// Format used by Bitwarden API
|
||||
const DATETIME_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.6fZ";
|
||||
|
||||
/// Formats a UTC-offset `NaiveDateTime` in the format used by Bitwarden API
|
||||
/// responses with "date" fields (`CreationDate`, `RevisionDate`, etc.).
|
||||
pub fn format_date(dt: &NaiveDateTime) -> String {
|
||||
dt.format(DATETIME_FORMAT).to_string()
|
||||
dt.and_utc().to_rfc3339_opts(chrono::SecondsFormat::Micros, true)
|
||||
}
|
||||
|
||||
/// Validates and formats a RFC3339 timestamp
|
||||
/// If parsing fails it will return the start of the unix datetime
|
||||
pub fn validate_and_format_date(dt: &str) -> String {
|
||||
match DateTime::parse_from_rfc3339(dt) {
|
||||
Ok(dt) => dt.to_rfc3339_opts(chrono::SecondsFormat::Micros, true),
|
||||
_ => String::from("1970-01-01T00:00:00.000000Z"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Formats a `DateTime<Local>` using the specified format string.
|
||||
@@ -486,7 +465,24 @@ pub fn format_datetime_http(dt: &DateTime<Local>) -> String {
|
||||
}
|
||||
|
||||
pub fn parse_date(date: &str) -> NaiveDateTime {
|
||||
NaiveDateTime::parse_from_str(date, DATETIME_FORMAT).unwrap()
|
||||
DateTime::parse_from_rfc3339(date).unwrap().naive_utc()
|
||||
}
|
||||
|
||||
/// Returns true or false if an email address is valid or not
|
||||
///
|
||||
/// Some extra checks instead of only using email_address
|
||||
/// This prevents from weird email formats still excepted but in the end invalid
|
||||
pub fn is_valid_email(email: &str) -> bool {
|
||||
let Ok(email) = email_address::EmailAddress::from_str(email) else {
|
||||
return false;
|
||||
};
|
||||
let Ok(email_url) = url::Url::parse(&format!("https://{}", email.domain())) else {
|
||||
return false;
|
||||
};
|
||||
if email_url.path().ne("/") || email_url.domain().is_none() || email_url.query().is_some() {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
Reference in New Issue
Block a user