mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2026-01-16 20:50:33 +00:00
Compare commits
247 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
638766b346 | ||
|
|
d1ff136552 | ||
|
|
46ec11de12 | ||
|
|
4283a49e0b | ||
|
|
1e32db8c41 | ||
|
|
0f944ec7e2 | ||
|
|
736dbc9553 | ||
|
|
b4a38f1f63 | ||
|
|
646186fe38 | ||
|
|
c2725916f4 | ||
|
|
fd334e2b7d | ||
|
|
f9feca1ce4 | ||
|
|
677fd2ff32 | ||
|
|
f49eb8eb4d | ||
|
|
b0e0d68632 | ||
|
|
f3c8c16d79 | ||
|
|
2dd5086916 | ||
|
|
7532072d50 | ||
|
|
382e6107fe | ||
|
|
e6c6609e19 | ||
|
|
4cb5918950 | ||
|
|
55030f3687 | ||
|
|
ef4072e4ff | ||
|
|
c78d383ed1 | ||
|
|
5b96270874 | ||
|
|
2c0742387b | ||
|
|
1704d14f29 | ||
|
|
2d7ffbf378 | ||
|
|
dfd63f85c0 | ||
|
|
cd0c49eaf6 | ||
|
|
080e38d227 | ||
|
|
1a664fba6a | ||
|
|
c915ef815d | ||
|
|
adea4ec54d | ||
|
|
387b5eb2dd | ||
|
|
6337af59ed | ||
|
|
475c7b8f16 | ||
|
|
ac120be1c6 | ||
|
|
b70316e6d3 | ||
|
|
0a0f620d0b | ||
|
|
9132cc4a30 | ||
|
|
e50edcadfb | ||
|
|
2685099720 | ||
|
|
6fa6eb18e8 | ||
|
|
bb79396f0e | ||
|
|
da9fd6b7d0 | ||
|
|
5b8067ef77 | ||
|
|
9eabcd5cae | ||
|
|
d6e0d4cbbd | ||
|
|
e5e6db2688 | ||
|
|
186fe24484 | ||
|
|
5da96d36e6 | ||
|
|
f4b1071e23 | ||
|
|
18291b6533 | ||
|
|
8095cb68bb | ||
|
|
04cd751556 | ||
|
|
7ce2372f51 | ||
|
|
aebda93afe | ||
|
|
2b7b1141eb | ||
|
|
1ff4ff72bf | ||
|
|
d27e91a9b0 | ||
|
|
7cf063b196 | ||
|
|
642f04d493 | ||
|
|
fc6e65e4b0 | ||
|
|
db5c98ec3b | ||
|
|
73c64af27e | ||
|
|
b3f7db813f | ||
|
|
59660ff087 | ||
|
|
69a69e8e04 | ||
|
|
1094f359c3 | ||
|
|
102ee3f871 | ||
|
|
acb5ab08a8 | ||
|
|
ae59472d9a | ||
|
|
5a07b193dc | ||
|
|
fd2edb9adc | ||
|
|
1d074f7b3f | ||
|
|
81984c4bce | ||
|
|
9c891baad1 | ||
|
|
b050c60807 | ||
|
|
e47a2fd0f3 | ||
|
|
42b9cc73ac | ||
|
|
edca4248aa | ||
|
|
b1b6bc9be0 | ||
|
|
818b254cef | ||
|
|
ddfac5e34b | ||
|
|
8b5c945bad | ||
|
|
50c5eb9c50 | ||
|
|
94be67eac1 | ||
|
|
5a05139efe | ||
|
|
a62dc102fb | ||
|
|
518d74ce21 | ||
|
|
7598997deb | ||
|
|
3c876dc202 | ||
|
|
1722742ab3 | ||
|
|
d9c0eb3cfc | ||
|
|
0d990e1dc0 | ||
|
|
60ed5ff99d | ||
|
|
5b98bd66ee | ||
|
|
abd20777fe | ||
|
|
7f0d0cf8a4 | ||
|
|
6e23a573fb | ||
|
|
ce9d93003c | ||
|
|
abfa868423 | ||
|
|
331f6c08fe | ||
|
|
c0efd3d419 | ||
|
|
1385d75972 | ||
|
|
9a787dd105 | ||
|
|
0dcc435bb4 | ||
|
|
f1a67663d1 | ||
|
|
0f95bdc9bb | ||
|
|
a0eab35768 | ||
|
|
027c87dd07 | ||
|
|
f2b31352fe | ||
|
|
c9376e3126 | ||
|
|
7cbcad0e38 | ||
|
|
e167798449 | ||
|
|
fc5928772b | ||
|
|
8263bdd21d | ||
|
|
3c1d4254e7 | ||
|
|
55d7c48b1d | ||
|
|
bf623eed7f | ||
|
|
84bcac0112 | ||
|
|
31595888ea | ||
|
|
5c38b2c4eb | ||
|
|
ebe9162af9 | ||
|
|
b64cf27038 | ||
|
|
0c4e79cff6 | ||
|
|
5b9129a086 | ||
|
|
93d4a12834 | ||
|
|
bf3e2dc652 | ||
|
|
0d0e98d783 | ||
|
|
5a55cfbb9b | ||
|
|
ac93b8a6b9 | ||
|
|
93786d9ebd | ||
|
|
a6dbb580c9 | ||
|
|
e62678abdb | ||
|
|
af50eae604 | ||
|
|
cb4f6aa7f6 | ||
|
|
5e13b1a7cb | ||
|
|
60b339f450 | ||
|
|
f71c779860 | ||
|
|
221a11de9b | ||
|
|
794483c10d | ||
|
|
c9934ccdb7 | ||
|
|
54729f3c1e | ||
|
|
f1a86acb98 | ||
|
|
6b6ea3c8bf | ||
|
|
bf403fee7d | ||
|
|
5cd920cf6f | ||
|
|
45d3b479bc | ||
|
|
c7a752b01d | ||
|
|
099d359628 | ||
|
|
006a2aacbb | ||
|
|
b71d9dd53e | ||
|
|
887e320e7f | ||
|
|
d7c18fd86e | ||
|
|
7566f3db3e | ||
|
|
5d05ec58be | ||
|
|
d9a452f558 | ||
|
|
dec03b3dc0 | ||
|
|
85950bdc0b | ||
|
|
f95bd3bb04 | ||
|
|
e33b8fab34 | ||
|
|
b00fbf153e | ||
|
|
0de5919a16 | ||
|
|
699777be9e | ||
|
|
16ff49d712 | ||
|
|
54c78cf06d | ||
|
|
303eaabeea | ||
|
|
6b6f5b8d04 | ||
|
|
0c18a7e306 | ||
|
|
a23a38080b | ||
|
|
316ca66a4b | ||
|
|
2f71a01877 | ||
|
|
d5cfbfc71d | ||
|
|
12612da75e | ||
|
|
68ec5f2a18 | ||
|
|
00670450df | ||
|
|
dbd95e08e9 | ||
|
|
3713f2d134 | ||
|
|
a85a250dfd | ||
|
|
5845ed2c92 | ||
|
|
40ed505581 | ||
|
|
bf0b8d9968 | ||
|
|
d0a7437dbd | ||
|
|
21b433c5d7 | ||
|
|
7c89bc619a | ||
|
|
0d3daa9fc6 | ||
|
|
0c1f0bad17 | ||
|
|
72cf59fa54 | ||
|
|
527bc1f625 | ||
|
|
2168d09421 | ||
|
|
1c266031d7 | ||
|
|
b636d20c64 | ||
|
|
2a9ca88c2a | ||
|
|
b9c434addb | ||
|
|
451ad47327 | ||
|
|
7f61dd5fe3 | ||
|
|
3ca85028ea | ||
|
|
542a73cc6e | ||
|
|
78d07e2fda | ||
|
|
b617ffd2af | ||
|
|
3abf173d89 | ||
|
|
df8aeb10e8 | ||
|
|
26ad06df7c | ||
|
|
37fff3ef4a | ||
|
|
28c5e63bf5 | ||
|
|
a07c213b3e | ||
|
|
ed72741f48 | ||
|
|
fb0c23b71f | ||
|
|
d98f95f536 | ||
|
|
6643e83b61 | ||
|
|
7b742009a1 | ||
|
|
649e2b48f3 | ||
|
|
81f0c2b0e8 | ||
|
|
80d8aa7239 | ||
|
|
27d4b713f6 | ||
|
|
b0faaf2527 | ||
|
|
8d06d9c111 | ||
|
|
c4d565b15b | ||
|
|
06f8e69c70 | ||
|
|
7db52374cd | ||
|
|
843f205f6f | ||
|
|
2ff51ae77e | ||
|
|
2b75d81a8b | ||
|
|
cad0dcbed1 | ||
|
|
19b8388950 | ||
|
|
87e08b9e50 | ||
|
|
0b7d6bf6df | ||
|
|
89fe05b6cc | ||
|
|
d73d74e78f | ||
|
|
9a682b7a45 | ||
|
|
94201ca133 | ||
|
|
99f9e7252a | ||
|
|
42136a7097 | ||
|
|
9bb4c38bf9 | ||
|
|
5f01db69ff | ||
|
|
c59a7f4a8c | ||
|
|
8295688bed | ||
|
|
9713a3a555 | ||
|
|
d781981bbd | ||
|
|
5125fdb882 | ||
|
|
fd9693b961 | ||
|
|
f38926d666 | ||
|
|
775d07e9a0 | ||
|
|
2d5f172e77 | ||
|
|
08f0de7b46 |
@@ -3,13 +3,18 @@ target
|
||||
|
||||
# Data folder
|
||||
data
|
||||
|
||||
# Misc
|
||||
.env
|
||||
.env.template
|
||||
.gitattributes
|
||||
.gitignore
|
||||
rustfmt.toml
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
.idea
|
||||
.editorconfig
|
||||
*.iml
|
||||
|
||||
# Documentation
|
||||
@@ -19,9 +24,17 @@ data
|
||||
*.yml
|
||||
*.yaml
|
||||
|
||||
# Docker folders
|
||||
# Docker
|
||||
hooks
|
||||
tools
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
docker/**
|
||||
!docker/healthcheck.sh
|
||||
!docker/start.sh
|
||||
|
||||
# Web vault
|
||||
web-vault
|
||||
web-vault
|
||||
|
||||
# Vaultwarden Resources
|
||||
resources
|
||||
|
||||
@@ -3,6 +3,11 @@
|
||||
##
|
||||
## Be aware that most of these settings will be overridden if they were changed
|
||||
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||
##
|
||||
## By default, vaultwarden expects for this file to be named ".env" and located
|
||||
## in the current working directory. If this is not the case, the environment
|
||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||
## vaultwarden.
|
||||
|
||||
## Main data folder
|
||||
# DATA_FOLDER=data
|
||||
@@ -24,11 +29,21 @@
|
||||
## Define the size of the connection pool used for connecting to the database.
|
||||
# DATABASE_MAX_CONNS=10
|
||||
|
||||
## Database connection initialization
|
||||
## Allows SQL statements to be run whenever a new database connection is created.
|
||||
## This is mainly useful for connection-scoped pragmas.
|
||||
## If empty, a database-specific default is used:
|
||||
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
|
||||
## - MySQL: ""
|
||||
## - PostgreSQL: ""
|
||||
# DATABASE_CONN_INIT=""
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
# RSA_KEY_FILENAME=data/rsa_key
|
||||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
# ATTACHMENTS_FOLDER=data/attachments
|
||||
# SENDS_FOLDER=data/sends
|
||||
# TMP_FOLDER=data/tmp
|
||||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
@@ -102,12 +117,10 @@
|
||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||
|
||||
## Logging to file
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# LOG_FILE=/path/to/log
|
||||
|
||||
## Logging to Syslog
|
||||
## This requires extended logging
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# USE_SYSLOG=false
|
||||
|
||||
## Log level
|
||||
@@ -232,6 +245,10 @@
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
|
||||
## The number of hours after which an organization invite token, emergency access invite token,
|
||||
## email verification token and deletion request token will expire (must be at least 1)
|
||||
# INVITATION_EXPIRATION_HOURS=120
|
||||
|
||||
## Per-organization attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per organization.
|
||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||
@@ -257,6 +274,9 @@
|
||||
## The change only applies when the password is changed
|
||||
# PASSWORD_ITERATIONS=100000
|
||||
|
||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
|
||||
## Controls whether a password hint should be shown directly in the web page if
|
||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||
## as this provides unauthenticated access to potentially sensitive data.
|
||||
@@ -267,7 +287,7 @@
|
||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||
## like attachment downloads, email links and U2F.
|
||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||
# DOMAIN=https://bw.domain.tld:8443
|
||||
# DOMAIN=https://vw.domain.tld:8443
|
||||
|
||||
## Allowed iframe ancestors (Know the risks!)
|
||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||
@@ -331,9 +351,8 @@
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=vaultwarden@domain.tld
|
||||
# SMTP_FROM_NAME=Vaultwarden
|
||||
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||
# SMTP_USERNAME=username
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_TIMEOUT=15
|
||||
|
||||
185
.github/workflows/build.yml
vendored
185
.github/workflows/build.yml
vendored
@@ -8,7 +8,6 @@ on:
|
||||
- "migrations/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "diesel.toml"
|
||||
- "rust-toolchain"
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -17,11 +16,11 @@ on:
|
||||
- "migrations/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "diesel.toml"
|
||||
- "rust-toolchain"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
# Make warnings errors, this is to prevent warnings slipping through.
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
@@ -30,118 +29,172 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel:
|
||||
- nightly
|
||||
target-triple:
|
||||
- x86_64-unknown-linux-gnu
|
||||
- "rust-toolchain" # The version defined in rust-toolchain
|
||||
- "msrv" # The supported MSRV
|
||||
include:
|
||||
- target-triple: x86_64-unknown-linux-gnu
|
||||
host-triple: x86_64-unknown-linux-gnu
|
||||
features: [sqlite,mysql,postgresql] # Remember to update the `cargo test` to match the amount of features
|
||||
channel: nightly
|
||||
os: ubuntu-20.04
|
||||
ext: ""
|
||||
- channel: "msrv"
|
||||
version: "1.60.0"
|
||||
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
|
||||
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install musl-tools when needed
|
||||
- name: Install musl tools
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||
# End Install musl-tools when needed
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: Install dependencies Ubuntu
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||
if: startsWith( matrix.os, 'ubuntu' )
|
||||
- name: "Install dependencies Ubuntu"
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@842ef286fff290e445b90b4002cc9807c3669641 # v1.3.0
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
# Uses the rust-toolchain file to determine version
|
||||
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f # v1.0.6
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target-triple }}
|
||||
components: clippy, rustfmt
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Install the MSRV channel to be used
|
||||
- name: "Install MSRV version"
|
||||
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f # v1.0.6
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
profile: minimal
|
||||
override: true
|
||||
toolchain: ${{ matrix.version }}
|
||||
# End Install the MSRV channel to be used
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@6720f05bc48b77f96918929a9019fb2203ff71f8 # v2.0.0
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
# Show environment
|
||||
- name: "Show environment"
|
||||
run: |
|
||||
rustc -vV
|
||||
cargo -vV
|
||||
# End Show environment
|
||||
|
||||
|
||||
# Run cargo tests (In release mode to speed up future builds)
|
||||
# First test all features together, afterwards test them separately.
|
||||
- name: "`cargo test --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: test_sqlite_mysql_postgresql_mimalloc
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||
# Test single features
|
||||
# 0: sqlite
|
||||
- name: "`cargo test --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
args: --release --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
- name: "test features: sqlite,mysql,postgresql"
|
||||
id: test_sqlite_mysql_postgresql
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[0] != '' }}
|
||||
# 1: mysql
|
||||
- name: "`cargo test --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
args: --release --features sqlite,mysql,postgresql
|
||||
|
||||
- name: "test features: sqlite"
|
||||
id: test_sqlite
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[1] != '' }}
|
||||
# 2: postgresql
|
||||
- name: "`cargo test --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
args: --release --features sqlite
|
||||
|
||||
- name: "test features: mysql"
|
||||
id: test_mysql
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[2] != '' }}
|
||||
args: --release --features mysql
|
||||
|
||||
- name: "test features: postgresql"
|
||||
id: test_postgresql
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: $${{ always() }}
|
||||
with:
|
||||
command: test
|
||||
args: --release --features postgresql
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
||||
- name: "`cargo clippy --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: clippy
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
command: clippy
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} -- -D warnings
|
||||
args: --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
# Run cargo fmt
|
||||
- name: '`cargo fmt`'
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
# Run cargo fmt (Only run on rust-toolchain defined version)
|
||||
- name: "check formatting"
|
||||
id: formatting
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
# End Run cargo fmt
|
||||
|
||||
|
||||
# Build the binary
|
||||
- name: "`cargo build --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
# Check for any previous failures, if there are stop, else continue.
|
||||
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
||||
- name: "Some checks failed"
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
echo "### :x: Checks Failed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|Job|Status|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|---|------|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql)|${{ steps.test_sqlite_mysql_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite)|${{ steps.test_sqlite.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (mysql)|${{ steps.test_mysql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (postgresql)|${{ steps.test_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.clippy.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|fmt|${{ steps.formatting.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Please check the failed jobs and fix where needed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
|
||||
|
||||
# Check for any previous failures, if there are stop, else continue.
|
||||
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
||||
- name: "All checks passed"
|
||||
if: ${{ success() }}
|
||||
run: |
|
||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
# Build the binary to upload to the artifacts
|
||||
- name: "build features: sqlite,mysql,postgresql"
|
||||
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1.0.3
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
command: build
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||
args: --release --features sqlite,mysql,postgresql
|
||||
# End Build the binary
|
||||
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@27121b0bdffd731efa15d66772be8dc71245d074 # v2.2.4
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||
name: vaultwarden
|
||||
path: target/release/vaultwarden
|
||||
# End Upload artifact to Github Actions
|
||||
|
||||
18
.github/workflows/hadolint.yml
vendored
18
.github/workflows/hadolint.yml
vendored
@@ -1,13 +1,9 @@
|
||||
name: Hadolint
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "docker/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "docker/**"
|
||||
on: [
|
||||
push,
|
||||
pull_request
|
||||
]
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
@@ -16,18 +12,18 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Download hadolint
|
||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
run: |
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
env:
|
||||
HADOLINT_VERSION: 2.7.0
|
||||
HADOLINT_VERSION: 2.10.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
steps:
|
||||
- name: Skip Duplicates Actions
|
||||
id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@f75dd6564bb646f95277dc8c3b80612e46a4a1ea # v3.4.1
|
||||
uses: fkirc/skip-duplicate-actions@9d116fa7e55f295019cfab7e3ab72b478bcf7fdd # v4.0.0
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
# Only run this when not creating a tag
|
||||
@@ -60,13 +60,13 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 # v1.10.0
|
||||
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v2.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.0.1
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
@@ -25,14 +25,16 @@ repos:
|
||||
description: Test the package for errors.
|
||||
entry: cargo test
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql", "--"]
|
||||
types: [rust]
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
description: Lint Rust sources
|
||||
entry: cargo clippy
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql", "--", "-D", "warnings"]
|
||||
types: [rust]
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|.*\.rs$)
|
||||
pass_filenames: false
|
||||
|
||||
3070
Cargo.lock
generated
3070
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
162
Cargo.toml
162
Cargo.toml
@@ -3,7 +3,7 @@ name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.60"
|
||||
rust-version = "1.60.0"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
@@ -13,6 +13,7 @@ publish = false
|
||||
build = "build.rs"
|
||||
|
||||
[features]
|
||||
# default = ["sqlite"]
|
||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||
enable_syslog = []
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
@@ -20,135 +21,138 @@ postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||
# Enable to use a vendored and statically linked openssl
|
||||
vendored_openssl = ["openssl/vendored"]
|
||||
# Enable MiMalloc memory allocator to replace the default malloc
|
||||
# This can improve performance for Alpine builds
|
||||
enable_mimalloc = ["mimalloc"]
|
||||
|
||||
# Enable unstable features, requires nightly
|
||||
# Currently only used to enable rusts official ip support
|
||||
unstable = []
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
syslog = "4.0.1"
|
||||
# Logging
|
||||
syslog = "6.0.1" # Needs to be v4 until fern is updated
|
||||
|
||||
[dependencies]
|
||||
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
||||
rocket = { version = "=0.5.0-dev", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "=0.5.0-dev"
|
||||
# Logging
|
||||
log = "0.4.17"
|
||||
fern = { version = "0.6.1", features = ["syslog-6"] }
|
||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.8", features = ["blocking", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
backtrace = "0.3.66" # Logging panics to logfile instead stderr only
|
||||
|
||||
# Used for custom short lived cookie jar
|
||||
cookie = "0.15.1"
|
||||
cookie_store = "0.15.1"
|
||||
bytes = "1.1.0"
|
||||
url = "2.2.2"
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.5", default-features = false }
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = { version = "0.18.0", features = ["server"], default-features = false }
|
||||
# Lazy initialization
|
||||
once_cell = "1.15.0"
|
||||
|
||||
# WebSockets library
|
||||
ws = { version = "0.11.1", package = "parity-ws" }
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.15"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "1.0.0"
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0-rc.2", features = ["tls", "json"], default-features = false }
|
||||
|
||||
# Concurrent hashmap implementation
|
||||
chashmap = "2.2.2"
|
||||
# WebSockets libraries
|
||||
tokio-tungstenite = "0.17.2"
|
||||
rmpv = "1.0.0" # MessagePack library
|
||||
dashmap = "5.4.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.24"
|
||||
tokio = { version = "1.21.2", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.132", features = ["derive"] }
|
||||
serde_json = "1.0.73"
|
||||
|
||||
# Logging
|
||||
log = "0.4.14"
|
||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||
serde = { version = "1.0.145", features = ["derive"] }
|
||||
serde_json = "1.0.86"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.8", features = [ "chrono", "r2d2"] }
|
||||
diesel = { version = "1.4.8", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = "0.8.4"
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "0.8.2", features = ["v4"] }
|
||||
uuid = { version = "1.2.1", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
chrono-tz = "0.6.1"
|
||||
time = "0.2.27"
|
||||
chrono = { version = "0.4.22", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.6.3"
|
||||
time = "0.3.15"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler = "1.2.1"
|
||||
job_scheduler_ng = "2.0.2"
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "1.0.3"
|
||||
|
||||
# Data encoding library
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.3.2"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "7.2.0"
|
||||
jsonwebtoken = "8.1.1"
|
||||
|
||||
# U2F library
|
||||
u2f = "0.2.0"
|
||||
webauthn-rs = "0.3.1"
|
||||
# TOTP library
|
||||
totp-lite = "2.0.0"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenv = { version = "0.15.0", default-features = false }
|
||||
# WebAuthn libraries
|
||||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.9.0"
|
||||
# Handling of URL's for WebAuthn
|
||||
url = "2.3.1"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.14"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# Email libraries
|
||||
tracing = { version = "0.1.29", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
|
||||
lettre = { version = "0.10.0-rc.4", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
# Email librariese-Base, Update crates and small change.
|
||||
lettre = { version = "0.10.1", features = ["smtp-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.2.0" # URL encoding library used for URL's in the emails
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "4.1.6", features = ["dir_source"] }
|
||||
handlebars = { version = "4.3.5", features = ["dir_source"] }
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.12", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
html5ever = "0.25.1"
|
||||
markup5ever_rcdom = "0.1.0"
|
||||
regex = { version = "1.5.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.1.1"
|
||||
html5gum = "0.5.2"
|
||||
regex = { version = "1.6.0", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.2.0"
|
||||
bytes = "1.2.1"
|
||||
cached = "0.39.0"
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.16.1"
|
||||
cookie_store = "0.17.0"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.38"
|
||||
|
||||
# URL encoding library
|
||||
percent-encoding = "2.1.0"
|
||||
# Punycode conversion
|
||||
idna = "0.2.3"
|
||||
openssl = "0.10.42"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.4.2"
|
||||
|
||||
# Logging panics to logfile instead stderr only
|
||||
backtrace = "0.3.63"
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.6"
|
||||
governor = "0.3.2"
|
||||
paste = "1.0.9"
|
||||
governor = "0.5.0"
|
||||
|
||||
# Capture CTRL+C
|
||||
ctrlc = { version = "3.2.3", features = ["termination"] }
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.30", features = ["secure"], default-features = false, optional = true }
|
||||
|
||||
[patch.crates-io]
|
||||
# Use newest ring
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
# Using a patched version of multer-rs (Used by Rocket) to fix attachment/send file uploads
|
||||
# Issue: https://github.com/dani-garcia/vaultwarden/issues/2644
|
||||
# Patch: https://github.com/BlackDex/multer-rs/commit/477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a
|
||||
multer = { git = "https://github.com/BlackDex/multer-rs", rev = "477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a" }
|
||||
|
||||
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||
# In particular, `cron` has since implemented parsing of some common syntax
|
||||
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
|
||||
# Strip debuginfo from the release builds
|
||||
# Also enable thin LTO for some optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
|
||||
@@ -7,12 +7,12 @@
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.**
|
||||
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
[global.limits]
|
||||
json = 10485760 # 10 MiB
|
||||
@@ -1,3 +1,4 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||
# manifest, rather than the target arch. For example:
|
||||
#
|
||||
|
||||
@@ -3,39 +3,39 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.58-buster" %}
|
||||
{% set build_stage_base_image = "rust:1.64-bullseye" %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-nightly-2022-01-23" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.15" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.64.0" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.16" %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.15" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.64.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.16" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-nightly-2022-01-23" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.15" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.64.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.16" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-nightly-2022-01-23" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.15" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.64.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.16" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||
{% set runtime_stage_base_image = "debian:bullseye-slim" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:bullseye" %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:bullseye" %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:bullseye" %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||
@@ -59,8 +59,8 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_version = "2.25.1b" %}
|
||||
{% set vault_image_digest = "sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba" %}
|
||||
{% set vault_version = "v2022.10.0" %}
|
||||
{% set vault_image_digest = "sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
@@ -70,13 +70,13 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:v{{ vault_version }}
|
||||
# $ docker pull vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:{{ vault_version }}
|
||||
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [vaultwarden/web-vault:v{{ vault_version }}]
|
||||
# [vaultwarden/web-vault:{{ vault_version }}]
|
||||
#
|
||||
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
@@ -93,22 +93,15 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
{# {% if "alpine" not in target_file and "buildx" in target_file %}
|
||||
# Debian based Buildx builds can use some special apt caching to speedup building.
|
||||
# By default Debian based images have some rules to keep docker builds clean, we need to remove this.
|
||||
# See: https://hub.docker.com/r/docker/dockerfile
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
{% endif %} #}
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
{% if "armv7" in target_file %}
|
||||
{#- https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html -#}
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
{% if "armv6" in target_file %}
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
#
|
||||
@@ -163,7 +156,12 @@ RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
||||
{% endif %}
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
{% if "alpine" in target_file %}
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
{% else %}
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% endif %}
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -182,21 +180,15 @@ RUN touch src/main.rs
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "armv7" in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM {{ runtime_stage_base_image }}
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
{%- if "alpine" in runtime_stage_base_image %} \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
{% endif %}
|
||||
@@ -214,7 +206,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
{% else %}
|
||||
&& apt-get update && apt-get install -y \
|
||||
@@ -222,13 +213,20 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
|
||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
@@ -241,7 +239,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
{% if package_arch_target is defined %}
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||
@@ -254,6 +251,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.58-buster as build
|
||||
FROM rust:1.64-bullseye as build
|
||||
|
||||
|
||||
|
||||
@@ -87,11 +87,11 @@ RUN cargo build --features ${DB} --release
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
@@ -101,7 +101,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -115,7 +114,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
@@ -124,6 +122,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-nightly-2022-01-23 as build
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.64.0 as build
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -58,7 +57,8 @@ COPY ./build.rs ./build.rs
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -81,11 +81,11 @@ RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.15
|
||||
FROM alpine:3.16
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
@@ -96,7 +96,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
|
||||
@@ -107,7 +106,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
@@ -116,6 +114,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.58-buster as build
|
||||
FROM rust:1.64-bullseye as build
|
||||
|
||||
|
||||
|
||||
@@ -87,11 +87,11 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
@@ -101,7 +101,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -115,7 +114,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
@@ -124,6 +122,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:x86_64-musl-nightly-2022-01-23 as build
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.64.0 as build
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -58,7 +57,8 @@ COPY ./build.rs ./build.rs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -81,11 +81,11 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.15
|
||||
FROM alpine:3.16
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
@@ -96,7 +96,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
|
||||
@@ -107,7 +106,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
@@ -116,6 +114,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.58-buster as build
|
||||
FROM rust:1.64-bullseye as build
|
||||
|
||||
|
||||
|
||||
@@ -107,11 +107,11 @@ RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -123,7 +123,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -139,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
@@ -148,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-nightly-2022-01-23 as build
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.64.0 as build
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -58,7 +57,8 @@ COPY ./build.rs ./build.rs
|
||||
RUN rustup target add aarch64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -81,11 +81,11 @@ RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.15
|
||||
FROM balenalib/aarch64-alpine:3.16
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
@@ -98,7 +98,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -111,7 +110,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
@@ -120,6 +118,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.58-buster as build
|
||||
FROM rust:1.64-bullseye as build
|
||||
|
||||
|
||||
|
||||
@@ -107,11 +107,11 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -123,7 +123,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -139,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
@@ -148,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-nightly-2022-01-23 as build
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.64.0 as build
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -58,7 +57,8 @@ COPY ./build.rs ./build.rs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -81,11 +81,11 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.15
|
||||
FROM balenalib/aarch64-alpine:3.16
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
@@ -98,7 +98,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -111,7 +110,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
@@ -120,6 +118,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.58-buster as build
|
||||
FROM rust:1.64-bullseye as build
|
||||
|
||||
|
||||
|
||||
@@ -107,11 +107,11 @@ RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -123,12 +123,16 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
@@ -139,7 +143,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
@@ -148,6 +151,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-nightly-2022-01-23 as build
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.64.0 as build
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,8 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -58,7 +59,8 @@ COPY ./build.rs ./build.rs
|
||||
RUN rustup target add arm-unknown-linux-musleabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -81,11 +83,11 @@ RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.15
|
||||
FROM balenalib/rpi-alpine:3.16
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
@@ -98,7 +100,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -111,7 +112,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||
|
||||
@@ -120,6 +120,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.58-buster as build
|
||||
FROM rust:1.64-bullseye as build
|
||||
|
||||
|
||||
|
||||
@@ -107,11 +107,11 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -123,12 +123,16 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
@@ -139,7 +143,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
@@ -148,6 +151,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-nightly-2022-01-23 as build
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.64.0 as build
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,8 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -58,7 +59,8 @@ COPY ./build.rs ./build.rs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -81,11 +83,11 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.15
|
||||
FROM balenalib/rpi-alpine:3.16
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
@@ -98,7 +100,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -111,7 +112,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||
|
||||
@@ -120,6 +120,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.58-buster as build
|
||||
FROM rust:1.64-bullseye as build
|
||||
|
||||
|
||||
|
||||
@@ -107,11 +107,11 @@ RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabih
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -123,7 +123,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -139,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
@@ -148,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23 as build
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.64.0 as build
|
||||
|
||||
|
||||
|
||||
@@ -44,8 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -59,7 +57,8 @@ COPY ./build.rs ./build.rs
|
||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -78,17 +77,15 @@ RUN touch src/main.rs
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.15
|
||||
FROM balenalib/armv7hf-alpine:3.16
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
@@ -101,7 +98,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -114,7 +110,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
@@ -123,6 +118,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.58-buster as build
|
||||
FROM rust:1.64-bullseye as build
|
||||
|
||||
|
||||
|
||||
@@ -107,11 +107,11 @@ RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -123,7 +123,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -139,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
@@ -148,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.25.1b
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
|
||||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.10.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.10.0
|
||||
# [vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
|
||||
# [vaultwarden/web-vault:v2.25.1b]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80
|
||||
# [vaultwarden/web-vault:v2022.10.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
|
||||
FROM vaultwarden/web-vault@sha256:8e8405d252bb6ecc7d59d90e9ba9dde09f35c1b6858371274c67c3e0a6f14a80 as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23 as build
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.64.0 as build
|
||||
|
||||
|
||||
|
||||
@@ -44,8 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -59,7 +57,8 @@ COPY ./build.rs ./build.rs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
@@ -78,17 +77,15 @@ RUN touch src/main.rs
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.15
|
||||
FROM balenalib/armv7hf-alpine:3.16
|
||||
|
||||
ENV ROCKET_ENV="staging" \
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
ROCKET_WORKERS=10 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
@@ -101,7 +98,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -114,7 +110,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
@@ -123,6 +118,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
# Use the value of the corresponding env var (if present),
|
||||
# or a default value otherwise.
|
||||
: ${DATA_FOLDER:="data"}
|
||||
: ${ROCKET_PORT:="80"}
|
||||
: "${DATA_FOLDER:="data"}"
|
||||
: "${ROCKET_PORT:="80"}"
|
||||
|
||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||
|
||||
|
||||
@@ -9,15 +9,15 @@ fi
|
||||
|
||||
if [ -d /etc/vaultwarden.d ]; then
|
||||
for f in /etc/vaultwarden.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
if [ -r "${f}" ]; then
|
||||
. "${f}"
|
||||
fi
|
||||
done
|
||||
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
if [ -r "${f}" ]; then
|
||||
. "${f}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
-- First remove the previous primary key
|
||||
ALTER TABLE devices DROP PRIMARY KEY;
|
||||
-- Add a new combined one
|
||||
ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid);
|
||||
@@ -0,0 +1,4 @@
|
||||
-- First remove the previous primary key
|
||||
ALTER TABLE devices DROP CONSTRAINT devices_pkey;
|
||||
-- Add a new combined one
|
||||
ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid);
|
||||
@@ -0,0 +1,23 @@
|
||||
-- Create new devices table with primary keys on both uuid and user_uuid
|
||||
CREATE TABLE devices_new (
|
||||
uuid TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
atype INTEGER NOT NULL,
|
||||
push_token TEXT,
|
||||
refresh_token TEXT NOT NULL,
|
||||
twofactor_remember TEXT,
|
||||
PRIMARY KEY(uuid, user_uuid),
|
||||
FOREIGN KEY(user_uuid) REFERENCES users(uuid)
|
||||
);
|
||||
|
||||
-- Transfer current data to new table
|
||||
INSERT INTO devices_new SELECT * FROM devices;
|
||||
|
||||
-- Drop the old table
|
||||
DROP TABLE devices;
|
||||
|
||||
-- Rename the new table to the original name
|
||||
ALTER TABLE devices_new RENAME TO devices;
|
||||
@@ -1 +1 @@
|
||||
nightly-2022-01-23
|
||||
1.64.0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
version = "Two"
|
||||
edition = "2018"
|
||||
# version = "Two"
|
||||
edition = "2021"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
use_small_heuristics = "Off"
|
||||
struct_lit_single_line = false
|
||||
overflow_delimited_expr = true
|
||||
# struct_lit_single_line = false
|
||||
# overflow_delimited_expr = true
|
||||
|
||||
353
src/api/admin.rs
353
src/api/admin.rs
@@ -3,13 +3,14 @@ use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use std::env;
|
||||
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{
|
||||
http::{Cookie, Cookies, SameSite, Status},
|
||||
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||
response::{content::Html, Flash, Redirect},
|
||||
form::Form,
|
||||
http::{Cookie, CookieJar, SameSite, Status},
|
||||
request::{self, FromRequest, Outcome, Request},
|
||||
response::{content::RawHtml as Html, Redirect},
|
||||
Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
@@ -24,6 +25,8 @@ use crate::{
|
||||
CONFIG, VERSION,
|
||||
};
|
||||
|
||||
use futures::{stream, stream::StreamExt};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||
return routes![admin_disabled];
|
||||
@@ -76,6 +79,7 @@ fn admin_disabled() -> &'static str {
|
||||
|
||||
const COOKIE_NAME: &str = "VW_ADMIN";
|
||||
const ADMIN_PATH: &str = "/admin";
|
||||
const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z";
|
||||
|
||||
const BASE_TEMPLATE: &str = "admin/base";
|
||||
|
||||
@@ -85,10 +89,11 @@ fn admin_path() -> String {
|
||||
|
||||
struct Referer(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for Referer {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||
}
|
||||
}
|
||||
@@ -96,10 +101,11 @@ impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||
#[derive(Debug)]
|
||||
struct IpHeader(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for IpHeader {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||
@@ -135,10 +141,24 @@ fn admin_url(referer: Referer) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Responder)]
|
||||
enum AdminResponse {
|
||||
#[response(status = 200)]
|
||||
Ok(ApiResult<Html<String>>),
|
||||
#[response(status = 401)]
|
||||
Unauthorized(ApiResult<Html<String>>),
|
||||
#[response(status = 429)]
|
||||
TooManyRequests(ApiResult<Html<String>>),
|
||||
}
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
fn admin_login() -> ApiResult<Html<String>> {
|
||||
render_admin_login(None)
|
||||
}
|
||||
|
||||
fn render_admin_login(msg: Option<&str>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||
let msg = msg.map(|msg| format!("Error: {msg}"));
|
||||
let json = json!({
|
||||
"page_content": "admin/login",
|
||||
"version": VERSION,
|
||||
@@ -157,22 +177,17 @@ struct LoginForm {
|
||||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
fn post_admin_login(
|
||||
data: Form<LoginForm>,
|
||||
mut cookies: Cookies,
|
||||
ip: ClientIp,
|
||||
referer: Referer,
|
||||
) -> Result<Redirect, Flash<Redirect>> {
|
||||
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> AdminResponse {
|
||||
let data = data.into_inner();
|
||||
|
||||
if crate::ratelimit::check_limit_admin(&ip.ip).is_err() {
|
||||
return Err(Flash::error(Redirect::to(admin_url(referer)), "Too many requests, try again later."));
|
||||
return AdminResponse::TooManyRequests(render_admin_login(Some("Too many requests, try again later.")));
|
||||
}
|
||||
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||
AdminResponse::Unauthorized(render_admin_login(Some("Invalid admin token, please try again.")))
|
||||
} else {
|
||||
// If the token received is valid, generate JWT and save it as a cookie
|
||||
let claims = generate_admin_claims();
|
||||
@@ -180,13 +195,13 @@ fn post_admin_login(
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
.path(admin_path())
|
||||
.max_age(time::Duration::minutes(20))
|
||||
.max_age(rocket::time::Duration::minutes(20))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
|
||||
cookies.add(cookie);
|
||||
Ok(Redirect::to(admin_url(referer)))
|
||||
AdminResponse::Ok(render_admin_page())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -238,20 +253,24 @@ impl AdminTemplateData {
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/", rank = 1)]
|
||||
fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
||||
fn render_admin_page() -> ApiResult<Html<String>> {
|
||||
let text = AdminTemplateData::new().render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/", rank = 1)]
|
||||
fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
||||
render_admin_page()
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct InviteData {
|
||||
email: String,
|
||||
}
|
||||
|
||||
fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn) {
|
||||
async fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn).await {
|
||||
Ok(user)
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
@@ -259,128 +278,135 @@ fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
}
|
||||
|
||||
#[post("/invite", data = "<data>")]
|
||||
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
let email = data.email.clone();
|
||||
if User::find_by_mail(&data.email, &conn).is_some() {
|
||||
if User::find_by_mail(&data.email, &conn).await.is_some() {
|
||||
err_code!("User already exists", Status::Conflict.code)
|
||||
}
|
||||
|
||||
let mut user = User::new(email);
|
||||
|
||||
// TODO: After try_blocks is stabilized, this can be made more readable
|
||||
// See: https://github.com/rust-lang/rust/issues/31436
|
||||
(|| {
|
||||
async fn _generate_invite(user: &User, conn: &DbConn) -> EmptyResult {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)?;
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
let invitation = Invitation::new(user.email.clone());
|
||||
invitation.save(&conn)?;
|
||||
invitation.save(conn).await
|
||||
}
|
||||
}
|
||||
|
||||
user.save(&conn)
|
||||
})()
|
||||
.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
_generate_invite(&user, &conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
user.save(&conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
Ok(Json(user.to_json(&conn).await))
|
||||
}
|
||||
|
||||
#[post("/test/smtp", data = "<data>")]
|
||||
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_test(&data.email)
|
||||
mail::send_test(&data.email).await
|
||||
} else {
|
||||
err!("Mail is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/logout")]
|
||||
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
Redirect::to(admin_url(referer))
|
||||
fn logout(cookies: &CookieJar<'_>, referer: Referer) -> Redirect {
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
Redirect::temporary(admin_url(referer))
|
||||
}
|
||||
|
||||
#[get("/users")]
|
||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
async fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||
let users_json = stream::iter(User::get_all(&conn).await)
|
||||
.then(|u| async {
|
||||
let u = u; // Move out this single variable
|
||||
let mut usr = u.to_json(&conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
|
||||
Json(Value::Array(users_json))
|
||||
}
|
||||
|
||||
#[get("/users/overview")]
|
||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||
let users_json: Vec<Value> = users
|
||||
.iter()
|
||||
.map(|u| {
|
||||
let mut usr = u.to_json(&conn);
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||
async fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users_json = stream::iter(User::get_all(&conn).await)
|
||||
.then(|u| async {
|
||||
let u = u; // Move out this single variable
|
||||
let mut usr = u.to_json(&conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn).await);
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn).await as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||
usr["last_active"] = match u.last_active(&conn) {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["last_active"] = match u.last_active(&conn).await {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||
None => json!("Never"),
|
||||
};
|
||||
usr
|
||||
})
|
||||
.collect();
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>")]
|
||||
fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let user = get_user_or_404(&uuid, &conn)?;
|
||||
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
async fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(&uuid, &conn).await?;
|
||||
let mut usr = u.to_json(&conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &conn)?;
|
||||
user.delete(&conn)
|
||||
async fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &conn).await?;
|
||||
user.delete(&conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
async fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn).await?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -391,10 +417,10 @@ struct UserOrgTypeData {
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
async fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
@@ -405,46 +431,58 @@ fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: D
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
if num_owners <= 1 {
|
||||
// Removing owner permmission, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &conn).await <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
}
|
||||
|
||||
user_to_edit.atype = new_type as i32;
|
||||
user_to_edit.save(&conn)
|
||||
// This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type
|
||||
// It returns different error messages per function.
|
||||
if new_type < UserOrgType::Admin {
|
||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &conn).await {
|
||||
Ok(_) => {}
|
||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
||||
}
|
||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
user_to_edit.atype = new_type;
|
||||
user_to_edit.save(&conn).await
|
||||
}
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn)
|
||||
async fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn).await
|
||||
}
|
||||
|
||||
#[get("/organizations/overview")]
|
||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations = Organization::get_all(&conn);
|
||||
let organizations_json: Vec<Value> = organizations
|
||||
.iter()
|
||||
.map(|o| {
|
||||
async fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations_json = stream::iter(Organization::get_all(&conn).await)
|
||||
.then(|o| async {
|
||||
let o = o; //Move out this single variable
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn).await);
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn).await as i32));
|
||||
org
|
||||
})
|
||||
.collect();
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/organizations/<uuid>/delete")]
|
||||
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||
org.delete(&conn)
|
||||
async fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &conn).await.map_res("Organization doesn't exist")?;
|
||||
org.delete(&conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -462,62 +500,37 @@ struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
async fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let github_api = get_reqwest_client();
|
||||
|
||||
Ok(github_api.get(url).send()?.error_for_status()?.json::<T>()?)
|
||||
Ok(github_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
}
|
||||
|
||||
fn has_http_access() -> bool {
|
||||
async fn has_http_access() -> bool {
|
||||
let http_access = get_reqwest_client();
|
||||
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").send() {
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
|
||||
Ok(r) => r.status().is_success(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use crate::util::read_file_string;
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let has_http_access = has_http_access();
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|| env::var_os("HTTPS_PROXY").is_some()
|
||||
|| env::var_os("https_proxy").is_some();
|
||||
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
use cached::proc_macro::cached;
|
||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
||||
#[cached(time = 300, sync_writes = true)]
|
||||
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||
if has_http_access {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") {
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main") {
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await
|
||||
{
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
@@ -531,7 +544,9 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
} else {
|
||||
match get_github_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
}
|
||||
@@ -539,8 +554,43 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
)
|
||||
} else {
|
||||
("-".to_string(), "-".to_string(), "-".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match std::fs::read_to_string(&format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match std::fs::read_to_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let has_http_access = has_http_access().await;
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|| env::var_os("HTTPS_PROXY").is_some()
|
||||
|| env::var_os("https_proxy").is_some();
|
||||
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
get_release_info(has_http_access, running_within_docker).await;
|
||||
|
||||
let ip_header_name = match &ip_header.0 {
|
||||
Some(h) => h,
|
||||
_ => "",
|
||||
@@ -562,7 +612,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
"ip_header_config": &CONFIG.ip_header(),
|
||||
"uses_proxy": uses_proxy,
|
||||
"db_type": *DB_TYPE,
|
||||
"db_version": get_sql_server_version(&conn),
|
||||
"db_version": get_sql_server_version(&conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
@@ -591,9 +641,9 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
async fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
if *CAN_BACKUP {
|
||||
backup_database(&conn)
|
||||
backup_database(&conn).await
|
||||
} else {
|
||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||
}
|
||||
@@ -601,28 +651,29 @@ fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
|
||||
pub struct AdminToken {}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for AdminToken {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for AdminToken {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
|
||||
if CONFIG.disable_admin_token() {
|
||||
Outcome::Success(AdminToken {})
|
||||
} else {
|
||||
let mut cookies = request.cookies();
|
||||
let cookies = request.cookies();
|
||||
|
||||
let access_token = match cookies.get(COOKIE_NAME) {
|
||||
Some(cookie) => cookie.value(),
|
||||
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
|
||||
};
|
||||
|
||||
let ip = match request.guard::<ClientIp>() {
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip.ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
if decode_admin(access_token).is_err() {
|
||||
// Remove admin cookie
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||
return Outcome::Forward(());
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use chrono::Utc;
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -62,12 +62,43 @@ struct KeysData {
|
||||
PublicKey: String,
|
||||
}
|
||||
|
||||
/// Trims whitespace from password hints, and converts blank password hints to `None`.
|
||||
fn clean_password_hint(password_hint: &Option<String>) -> Option<String> {
|
||||
match password_hint {
|
||||
None => None,
|
||||
Some(h) => match h.trim() {
|
||||
"" => None,
|
||||
ht => Some(ht.to_string()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult {
|
||||
if password_hint.is_some() && !CONFIG.password_hints_allowed() {
|
||||
err!("Password hints have been disabled by the administrator. Remove the hint and try again.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
let data: RegisterData = data.into_inner().data;
|
||||
let email = data.Email.to_lowercase();
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &conn) {
|
||||
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
|
||||
// This also prevents issues with very long usernames causing to large JWT's. See #2419
|
||||
if let Some(ref name) = data.Name {
|
||||
if name.len() > 50 {
|
||||
err!("The field Name must be a string with a maximum length of 50.");
|
||||
}
|
||||
}
|
||||
|
||||
// Check against the password hint setting here so if it fails, the user
|
||||
// can retry without losing their invitation below.
|
||||
let password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&password_hint)?;
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &conn).await {
|
||||
Some(user) => {
|
||||
if !user.password_hash.is_empty() {
|
||||
if CONFIG.is_signup_allowed(&email) {
|
||||
@@ -84,13 +115,13 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
} else {
|
||||
err!("Registration email does not match invite email")
|
||||
}
|
||||
} else if Invitation::take(&email, &conn) {
|
||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
|
||||
} else if Invitation::take(&email, &conn).await {
|
||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).await.iter_mut() {
|
||||
user_org.status = UserOrgStatus::Accepted as i32;
|
||||
user_org.save(&conn)?;
|
||||
user_org.save(&conn).await?;
|
||||
}
|
||||
user
|
||||
} else if EmergencyAccess::find_invited_by_grantee_email(&email, &conn).is_some() {
|
||||
} else if EmergencyAccess::find_invited_by_grantee_email(&email, &conn).await.is_some() {
|
||||
user
|
||||
} else if CONFIG.is_signup_allowed(&email) {
|
||||
err!("Account with this email already exists")
|
||||
@@ -102,7 +133,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the vaultwarden admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&email, &conn) || CONFIG.is_signup_allowed(&email) {
|
||||
if Invitation::take(&email, &conn).await || CONFIG.is_signup_allowed(&email) {
|
||||
User::new(email.clone())
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
@@ -111,7 +142,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
};
|
||||
|
||||
// Make sure we don't leave a lingering invitation.
|
||||
Invitation::take(&email, &conn);
|
||||
Invitation::take(&email, &conn).await;
|
||||
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
@@ -123,16 +154,13 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.password_hint = password_hint;
|
||||
|
||||
// Add extra fields if present
|
||||
if let Some(name) = data.Name {
|
||||
user.name = name;
|
||||
}
|
||||
|
||||
if let Some(hint) = data.MasterPasswordHint {
|
||||
user.password_hint = Some(hint);
|
||||
}
|
||||
|
||||
if let Some(keys) = data.Keys {
|
||||
user.private_key = Some(keys.EncryptedPrivateKey);
|
||||
user.public_key = Some(keys.PublicKey);
|
||||
@@ -140,22 +168,26 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if CONFIG.signups_verify() {
|
||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid) {
|
||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
|
||||
user.last_verifying_at = Some(user.created_at);
|
||||
} else if let Err(e) = mail::send_welcome(&user.email) {
|
||||
} else if let Err(e) = mail::send_welcome(&user.email).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&conn).await?;
|
||||
Ok(Json(json!({
|
||||
"Object": "register",
|
||||
"CaptchaBypassToken": "",
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/accounts/profile")]
|
||||
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&conn))
|
||||
async fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&conn).await)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -168,28 +200,32 @@ struct ProfileData {
|
||||
}
|
||||
|
||||
#[put("/accounts/profile", data = "<data>")]
|
||||
fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_profile(data, headers, conn)
|
||||
async fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_profile(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/profile", data = "<data>")]
|
||||
fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: ProfileData = data.into_inner().data;
|
||||
|
||||
let mut user = headers.user;
|
||||
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
|
||||
// This also prevents issues with very long usernames causing to large JWT's. See #2419
|
||||
if data.Name.len() > 50 {
|
||||
err!("The field Name must be a string with a maximum length of 50.");
|
||||
}
|
||||
|
||||
let mut user = headers.user;
|
||||
user.name = data.Name;
|
||||
user.password_hint = match data.MasterPasswordHint {
|
||||
Some(ref h) if h.is_empty() => None,
|
||||
_ => data.MasterPasswordHint,
|
||||
};
|
||||
user.save(&conn)?;
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
user.password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&user.password_hint)?;
|
||||
|
||||
user.save(&conn).await?;
|
||||
Ok(Json(user.to_json(&conn).await))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>/public-key")]
|
||||
fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
||||
async fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -202,7 +238,7 @@ fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult
|
||||
}
|
||||
|
||||
#[post("/accounts/keys", data = "<data>")]
|
||||
fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: KeysData = data.into_inner().data;
|
||||
|
||||
let mut user = headers.user;
|
||||
@@ -210,7 +246,7 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
||||
user.private_key = Some(data.EncryptedPrivateKey);
|
||||
user.public_key = Some(data.PublicKey);
|
||||
|
||||
user.save(&conn)?;
|
||||
user.save(&conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"PrivateKey": user.private_key,
|
||||
@@ -228,7 +264,7 @@ struct ChangePassData {
|
||||
}
|
||||
|
||||
#[post("/accounts/password", data = "<data>")]
|
||||
fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: ChangePassData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -241,7 +277,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
||||
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
|
||||
);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -256,7 +292,7 @@ struct ChangeKdfData {
|
||||
}
|
||||
|
||||
#[post("/accounts/kdf", data = "<data>")]
|
||||
fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: ChangeKdfData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -268,7 +304,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -291,7 +327,7 @@ struct KeyData {
|
||||
}
|
||||
|
||||
#[post("/accounts/key", data = "<data>")]
|
||||
fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let data: KeyData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
@@ -302,7 +338,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
|
||||
// Update folder data
|
||||
for folder_data in data.Folders {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn) {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn).await {
|
||||
Some(folder) => folder,
|
||||
None => err!("Folder doesn't exist"),
|
||||
};
|
||||
@@ -312,14 +348,14 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
}
|
||||
|
||||
saved_folder.name = folder_data.Name;
|
||||
saved_folder.save(&conn)?
|
||||
saved_folder.save(&conn).await?
|
||||
}
|
||||
|
||||
// Update cipher data
|
||||
use super::ciphers::update_cipher_from_data;
|
||||
|
||||
for cipher_data in data.Ciphers {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn) {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn).await {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
@@ -330,7 +366,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
|
||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None)?
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None).await?
|
||||
}
|
||||
|
||||
// Update user data
|
||||
@@ -340,11 +376,11 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/security-stamp", data = "<data>")]
|
||||
fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -352,9 +388,9 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -365,7 +401,7 @@ struct EmailTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email-token", data = "<data>")]
|
||||
fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: EmailTokenData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -373,7 +409,7 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
|
||||
if User::find_by_mail(&data.NewEmail, &conn).await.is_some() {
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -384,14 +420,14 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
||||
let token = crypto::generate_email_token(6);
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Err(e) = mail::send_change_email(&data.NewEmail, &token) {
|
||||
if let Err(e) = mail::send_change_email(&data.NewEmail, &token).await {
|
||||
error!("Error sending change-email email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
user.email_new = Some(data.NewEmail);
|
||||
user.email_new_token = Some(token);
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -406,7 +442,7 @@ struct ChangeEmailData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email", data = "<data>")]
|
||||
fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: ChangeEmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -414,7 +450,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
|
||||
if User::find_by_mail(&data.NewEmail, &conn).await.is_some() {
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -449,18 +485,18 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email")]
|
||||
fn post_verify_email(headers: Headers) -> EmptyResult {
|
||||
async fn post_verify_email(headers: Headers) -> EmptyResult {
|
||||
let user = headers.user;
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
err!("Cannot verify email address");
|
||||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
||||
error!("Error sending verify_email email: {:#?}", e);
|
||||
}
|
||||
|
||||
@@ -475,10 +511,10 @@ struct VerifyEmailTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email-token", data = "<data>")]
|
||||
fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyEmailTokenData = data.into_inner().data;
|
||||
|
||||
let mut user = match User::find_by_uuid(&data.UserId, &conn) {
|
||||
let mut user = match User::find_by_uuid(&data.UserId, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -493,7 +529,7 @@ fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn)
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
user.last_verifying_at = None;
|
||||
user.login_verify_count = 0;
|
||||
if let Err(e) = user.save(&conn) {
|
||||
if let Err(e) = user.save(&conn).await {
|
||||
error!("Error saving email verification: {:#?}", e);
|
||||
}
|
||||
|
||||
@@ -507,14 +543,12 @@ struct DeleteRecoverData {
|
||||
}
|
||||
|
||||
#[post("/accounts/delete-recover", data = "<data>")]
|
||||
fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverData = data.into_inner().data;
|
||||
|
||||
let user = User::find_by_mail(&data.Email, &conn);
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Some(user) = user {
|
||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid) {
|
||||
if let Some(user) = User::find_by_mail(&data.Email, &conn).await {
|
||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid).await {
|
||||
error!("Error sending delete account email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
@@ -536,10 +570,10 @@ struct DeleteRecoverTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/delete-recover-token", data = "<data>")]
|
||||
fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverTokenData = data.into_inner().data;
|
||||
|
||||
let user = match User::find_by_uuid(&data.UserId, &conn) {
|
||||
let user = match User::find_by_uuid(&data.UserId, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -551,16 +585,16 @@ fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbC
|
||||
if claims.sub != user.uuid {
|
||||
err!("Invalid claim");
|
||||
}
|
||||
user.delete(&conn)
|
||||
user.delete(&conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/delete", data = "<data>")]
|
||||
fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_account(data, headers, conn)
|
||||
async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_account(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[delete("/accounts", data = "<data>")]
|
||||
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -568,7 +602,7 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.delete(&conn)
|
||||
user.delete(&conn).await
|
||||
}
|
||||
|
||||
#[get("/accounts/revision-date")]
|
||||
@@ -584,7 +618,7 @@ struct PasswordHintData {
|
||||
}
|
||||
|
||||
#[post("/accounts/password-hint", data = "<data>")]
|
||||
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||
async fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() {
|
||||
err!("This server is not configured to provide password hints.");
|
||||
}
|
||||
@@ -594,19 +628,18 @@ fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
let data: PasswordHintData = data.into_inner().data;
|
||||
let email = &data.Email;
|
||||
|
||||
match User::find_by_mail(email, &conn) {
|
||||
match User::find_by_mail(email, &conn).await {
|
||||
None => {
|
||||
// To prevent user enumeration, act as if the user exists.
|
||||
if CONFIG.mail_enabled() {
|
||||
// There is still a timing side channel here in that the code
|
||||
// paths that send mail take noticeably longer than ones that
|
||||
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{thread_rng, Rng};
|
||||
let mut rng = thread_rng();
|
||||
let base = 1000;
|
||||
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
let delta: i32 = 100;
|
||||
let sleep_ms = (base + rng.gen_range(-delta..=delta)) as u64;
|
||||
std::thread::sleep(std::time::Duration::from_millis(sleep_ms));
|
||||
let sleep_ms = (1_000 + rng.gen_range(-delta..=delta)) as u64;
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||
Ok(())
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
@@ -615,7 +648,7 @@ fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
Some(user) => {
|
||||
let hint: Option<String> = user.password_hint;
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_password_hint(email, hint)?;
|
||||
mail::send_password_hint(email, hint).await?;
|
||||
Ok(())
|
||||
} else if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", hint));
|
||||
@@ -628,15 +661,19 @@ fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct PreloginData {
|
||||
pub struct PreloginData {
|
||||
Email: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/prelogin", data = "<data>")]
|
||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
pub async fn _prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
};
|
||||
@@ -666,7 +703,12 @@ fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _api_key(data: JsonUpcase<SecretVerificationRequest>, rotate: bool, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn _api_key(
|
||||
data: JsonUpcase<SecretVerificationRequest>,
|
||||
rotate: bool,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: SecretVerificationRequest = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -676,7 +718,7 @@ fn _api_key(data: JsonUpcase<SecretVerificationRequest>, rotate: bool, headers:
|
||||
|
||||
if rotate || user.api_key.is_none() {
|
||||
user.api_key = Some(crypto::generate_api_key());
|
||||
user.save(&conn).expect("Error saving API key");
|
||||
user.save(&conn).await.expect("Error saving API key");
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -686,11 +728,11 @@ fn _api_key(data: JsonUpcase<SecretVerificationRequest>, rotate: bool, headers:
|
||||
}
|
||||
|
||||
#[post("/accounts/api-key", data = "<data>")]
|
||||
fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, false, headers, conn)
|
||||
async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, false, headers, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/rotate-api-key", data = "<data>")]
|
||||
fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, true, headers, conn)
|
||||
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, true, headers, conn).await
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,21 @@
|
||||
use chrono::{Duration, Utc};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use std::borrow::Borrow;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString},
|
||||
api::{
|
||||
core::{CipherSyncData, CipherSyncType},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
},
|
||||
auth::{decode_emergency_access_invite, Headers},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
use futures::{stream, stream::StreamExt};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
get_contacts,
|
||||
@@ -36,13 +41,17 @@ pub fn routes() -> Vec<Route> {
|
||||
// region get
|
||||
|
||||
#[get("/emergency-access/trusted")]
|
||||
fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn);
|
||||
|
||||
let emergency_access_list_json: Vec<Value> =
|
||||
emergency_access_list.iter().map(|e| e.to_json_grantee_details(&conn)).collect();
|
||||
let emergency_access_list_json =
|
||||
stream::iter(EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn).await)
|
||||
.then(|e| async {
|
||||
let e = e; // Move out this single variable
|
||||
e.to_json_grantee_details(&conn).await
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
@@ -52,13 +61,17 @@ fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[get("/emergency-access/granted")]
|
||||
fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn);
|
||||
|
||||
let emergency_access_list_json: Vec<Value> =
|
||||
emergency_access_list.iter().map(|e| e.to_json_grantor_details(&conn)).collect();
|
||||
let emergency_access_list_json =
|
||||
stream::iter(EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn).await)
|
||||
.then(|e| async {
|
||||
let e = e; // Move out this single variable
|
||||
e.to_json_grantor_details(&conn).await
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
@@ -68,11 +81,11 @@ fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[get("/emergency-access/<emer_id>")]
|
||||
fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult {
|
||||
async fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn))),
|
||||
match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn).await)),
|
||||
None => err!("Emergency access not valid."),
|
||||
}
|
||||
}
|
||||
@@ -90,17 +103,25 @@ struct EmergencyAccessUpdateData {
|
||||
}
|
||||
|
||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||
fn put_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, conn)
|
||||
async fn put_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, conn).await
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||
fn post_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
||||
async fn post_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -114,7 +135,7 @@ fn post_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdate
|
||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&conn).await?;
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
}
|
||||
|
||||
@@ -123,12 +144,12 @@ fn post_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdate
|
||||
// region delete
|
||||
|
||||
#[delete("/emergency-access/<emer_id>")]
|
||||
fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let grantor_user = headers.user;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => {
|
||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
||||
err!("Emergency access not valid.")
|
||||
@@ -137,13 +158,13 @@ fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> E
|
||||
}
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
emergency_access.delete(&conn)?;
|
||||
emergency_access.delete(&conn).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/delete")]
|
||||
fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_emergency_access(emer_id, headers, conn)
|
||||
async fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_emergency_access(emer_id, headers, conn).await
|
||||
}
|
||||
|
||||
// endregion
|
||||
@@ -159,7 +180,7 @@ struct EmergencyAccessInviteData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/invite", data = "<data>")]
|
||||
fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
||||
@@ -180,7 +201,7 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
err!("You can not set yourself as an emergency contact.")
|
||||
}
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &conn) {
|
||||
let grantee_user = match User::find_by_mail(&email, &conn).await {
|
||||
None => {
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("Grantee user does not exist: {}", email))
|
||||
@@ -192,11 +213,11 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
let invitation = Invitation::new(email.clone());
|
||||
invitation.save(&conn)?;
|
||||
invitation.save(&conn).await?;
|
||||
}
|
||||
|
||||
let mut user = User::new(email.clone());
|
||||
user.save(&conn)?;
|
||||
user.save(&conn).await?;
|
||||
user
|
||||
}
|
||||
Some(user) => user,
|
||||
@@ -208,6 +229,7 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
&grantee_user.email,
|
||||
&conn,
|
||||
)
|
||||
.await
|
||||
.is_some()
|
||||
{
|
||||
err!(format!("Grantee user already invited: {}", email))
|
||||
@@ -220,7 +242,7 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
new_type,
|
||||
wait_time_days,
|
||||
);
|
||||
new_emergency_access.save(&conn)?;
|
||||
new_emergency_access.save(&conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite(
|
||||
@@ -229,13 +251,14 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
Some(new_emergency_access.uuid),
|
||||
Some(grantor_user.name.clone()),
|
||||
Some(grantor_user.email),
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match User::find_by_mail(&email, &conn) {
|
||||
match User::find_by_mail(&email, &conn).await {
|
||||
Some(user) => {
|
||||
match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()) {
|
||||
Ok(v) => (v),
|
||||
match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
}
|
||||
@@ -247,10 +270,10 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||
fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -268,7 +291,7 @@ fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult
|
||||
None => err!("Email not valid."),
|
||||
};
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &conn) {
|
||||
let grantee_user = match User::find_by_mail(&email, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
@@ -282,16 +305,19 @@ fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult
|
||||
Some(emergency_access.uuid),
|
||||
Some(grantor_user.name.clone()),
|
||||
Some(grantor_user.email),
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
if Invitation::find_by_mail(&email, &conn).is_none() {
|
||||
if Invitation::find_by_mail(&email, &conn).await.is_none() {
|
||||
let invitation = Invitation::new(email);
|
||||
invitation.save(&conn)?;
|
||||
invitation.save(&conn).await?;
|
||||
}
|
||||
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow()) {
|
||||
Ok(v) => (v),
|
||||
match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow())
|
||||
.await
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
}
|
||||
@@ -306,28 +332,28 @@ struct AcceptData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||
fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) -> EmptyResult {
|
||||
async fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: AcceptData = data.into_inner().data;
|
||||
let token = &data.Token;
|
||||
let claims = decode_emergency_access_invite(token)?;
|
||||
|
||||
let grantee_user = match User::find_by_mail(&claims.email, &conn) {
|
||||
let grantee_user = match User::find_by_mail(&claims.email, &conn).await {
|
||||
Some(user) => {
|
||||
Invitation::take(&claims.email, &conn);
|
||||
Invitation::take(&claims.email, &conn).await;
|
||||
user
|
||||
}
|
||||
None => err!("Invited user not found"),
|
||||
};
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
@@ -336,13 +362,13 @@ fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) ->
|
||||
&& (claims.grantor_name.is_some() && grantor_user.name == claims.grantor_name.unwrap())
|
||||
&& (claims.grantor_email.is_some() && grantor_user.email == claims.grantor_email.unwrap())
|
||||
{
|
||||
match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn) {
|
||||
Ok(v) => (v),
|
||||
match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email)?;
|
||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -351,8 +377,13 @@ fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) ->
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_invite_process(grantee_uuid: String, emer_id: String, email: Option<String>, conn: &DbConn) -> EmptyResult {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn) {
|
||||
async fn accept_invite_process(
|
||||
grantee_uuid: String,
|
||||
emer_id: String,
|
||||
email: Option<String>,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -369,7 +400,7 @@ fn accept_invite_process(grantee_uuid: String, emer_id: String, email: Option<St
|
||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
||||
emergency_access.grantee_uuid = Some(grantee_uuid);
|
||||
emergency_access.email = None;
|
||||
emergency_access.save(conn)
|
||||
emergency_access.save(conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -379,7 +410,7 @@ struct ConfirmData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||
fn confirm_emergency_access(
|
||||
async fn confirm_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<ConfirmData>,
|
||||
headers: Headers,
|
||||
@@ -391,7 +422,7 @@ fn confirm_emergency_access(
|
||||
let data: ConfirmData = data.into_inner().data;
|
||||
let key = data.Key;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -402,13 +433,13 @@ fn confirm_emergency_access(
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
@@ -417,10 +448,10 @@ fn confirm_emergency_access(
|
||||
emergency_access.key_encrypted = Some(key);
|
||||
emergency_access.email = None;
|
||||
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name)?;
|
||||
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
@@ -433,11 +464,11 @@ fn confirm_emergency_access(
|
||||
// region access emergency access
|
||||
|
||||
#[post("/emergency-access/<emer_id>/initiate")]
|
||||
fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let initiating_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -448,7 +479,7 @@ fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
@@ -458,7 +489,7 @@ fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
emergency_access.updated_at = now;
|
||||
emergency_access.recovery_initiated_at = Some(now);
|
||||
emergency_access.last_notification_at = Some(now);
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_initiated(
|
||||
@@ -466,17 +497,18 @@ fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
&initiating_user.name,
|
||||
emergency_access.get_type_as_str(),
|
||||
&emergency_access.wait_time_days.clone().to_string(),
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/approve")]
|
||||
fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let approving_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -487,22 +519,22 @@ fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)?;
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
@@ -511,11 +543,11 @@ fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reject")]
|
||||
fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let rejecting_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -527,22 +559,22 @@ fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> J
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name)?;
|
||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
@@ -555,12 +587,12 @@ fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> J
|
||||
// region action
|
||||
|
||||
#[post("/emergency-access/<emer_id>/view")]
|
||||
fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let host = headers.host;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -569,10 +601,17 @@ fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> Jso
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn);
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn).await;
|
||||
let cipher_sync_data =
|
||||
CipherSyncData::new(&emergency_access.grantor_uuid, &ciphers, CipherSyncType::User, &conn).await;
|
||||
|
||||
let ciphers_json: Vec<Value> =
|
||||
ciphers.iter().map(|c| c.to_json(&host, &emergency_access.grantor_uuid, &conn)).collect();
|
||||
let ciphers_json = stream::iter(ciphers)
|
||||
.then(|c| async {
|
||||
let c = c; // Move out this single variable
|
||||
c.to_json(&host, &emergency_access.grantor_uuid, Some(&cipher_sync_data), &conn).await
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Ciphers": ciphers_json,
|
||||
@@ -582,11 +621,11 @@ fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> Jso
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/takeover")]
|
||||
fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -595,7 +634,7 @@ fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
@@ -616,7 +655,7 @@ struct EmergencyAccessPasswordData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||
fn password_emergency_access(
|
||||
async fn password_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
||||
headers: Headers,
|
||||
@@ -629,7 +668,7 @@ fn password_emergency_access(
|
||||
let key = data.Key;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -638,7 +677,7 @@ fn password_emergency_access(
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
@@ -646,18 +685,15 @@ fn password_emergency_access(
|
||||
// change grantor_user password
|
||||
grantor_user.set_password(new_master_password_hash, None);
|
||||
grantor_user.akey = key;
|
||||
grantor_user.save(&conn)?;
|
||||
grantor_user.save(&conn).await?;
|
||||
|
||||
// Disable TwoFactor providers since they will otherwise block logins
|
||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn)?;
|
||||
|
||||
// Removing owner, check that there are at least another owner
|
||||
let user_org_grantor = UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn);
|
||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn).await?;
|
||||
|
||||
// Remove grantor from all organisations unless Owner
|
||||
for user_org in user_org_grantor {
|
||||
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn).await {
|
||||
if user_org.atype != UserOrgType::Owner as i32 {
|
||||
user_org.delete(&conn)?;
|
||||
user_org.delete(&conn).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -666,9 +702,9 @@ fn password_emergency_access(
|
||||
// endregion
|
||||
|
||||
#[get("/emergency-access/<emer_id>/policies")]
|
||||
fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -677,13 +713,13 @@ fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": policies_json,
|
||||
@@ -709,14 +745,14 @@ fn check_emergency_access_allowed() -> EmptyResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn emergency_request_timeout_job(pool: DbPool) {
|
||||
pub async fn emergency_request_timeout_job(pool: DbPool) {
|
||||
debug!("Start emergency_request_timeout_job");
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(conn) = pool.get() {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn);
|
||||
if let Ok(conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn).await;
|
||||
|
||||
if emergency_access_list.is_empty() {
|
||||
debug!("No emergency request timeout to approve");
|
||||
@@ -725,18 +761,20 @@ pub fn emergency_request_timeout_job(pool: DbPool) {
|
||||
for mut emer in emergency_access_list {
|
||||
if emer.recovery_initiated_at.is_some()
|
||||
&& Utc::now().naive_utc()
|
||||
>= emer.recovery_initiated_at.unwrap() + Duration::days(emer.wait_time_days as i64)
|
||||
>= emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days))
|
||||
{
|
||||
emer.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||
emer.save(&conn).expect("Cannot save emergency access on job");
|
||||
emer.save(&conn).await.expect("Cannot save emergency access on job");
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found.");
|
||||
let grantor_user =
|
||||
User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found.");
|
||||
|
||||
// get grantee user to send Accepted email
|
||||
let grantee_user =
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
|
||||
.await
|
||||
.expect("Grantee user not found.");
|
||||
|
||||
mail::send_emergency_access_recovery_timed_out(
|
||||
@@ -744,9 +782,11 @@ pub fn emergency_request_timeout_job(pool: DbPool) {
|
||||
&grantee_user.name.clone(),
|
||||
emer.get_type_as_str(),
|
||||
)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name.clone())
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
}
|
||||
}
|
||||
@@ -756,14 +796,14 @@ pub fn emergency_request_timeout_job(pool: DbPool) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn emergency_notification_reminder_job(pool: DbPool) {
|
||||
pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
||||
debug!("Start emergency_notification_reminder_job");
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(conn) = pool.get() {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn);
|
||||
if let Ok(conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn).await;
|
||||
|
||||
if emergency_access_list.is_empty() {
|
||||
debug!("No emergency request reminder notification to send");
|
||||
@@ -772,20 +812,22 @@ pub fn emergency_notification_reminder_job(pool: DbPool) {
|
||||
for mut emer in emergency_access_list {
|
||||
if (emer.recovery_initiated_at.is_some()
|
||||
&& Utc::now().naive_utc()
|
||||
>= emer.recovery_initiated_at.unwrap() + Duration::days((emer.wait_time_days as i64) - 1))
|
||||
>= emer.recovery_initiated_at.unwrap() + Duration::days((i64::from(emer.wait_time_days)) - 1))
|
||||
&& (emer.last_notification_at.is_none()
|
||||
|| (emer.last_notification_at.is_some()
|
||||
&& Utc::now().naive_utc() >= emer.last_notification_at.unwrap() + Duration::days(1)))
|
||||
{
|
||||
emer.save(&conn).expect("Cannot save emergency access on job");
|
||||
emer.save(&conn).await.expect("Cannot save emergency access on job");
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found.");
|
||||
let grantor_user =
|
||||
User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found.");
|
||||
|
||||
// get grantee user to send Accepted email
|
||||
let grantee_user =
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
|
||||
.await
|
||||
.expect("Grantee user not found.");
|
||||
|
||||
mail::send_emergency_access_recovery_reminder(
|
||||
@@ -794,6 +836,7 @@ pub fn emergency_notification_reminder_job(pool: DbPool) {
|
||||
emer.get_type_as_str(),
|
||||
&emer.wait_time_days.to_string(), // TODO(jjlin): This should be the number of days left.
|
||||
)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -12,9 +12,8 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
}
|
||||
|
||||
#[get("/folders")]
|
||||
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
|
||||
async fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn).await;
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
Json(json!({
|
||||
@@ -25,8 +24,8 @@ fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/folders/<uuid>")]
|
||||
fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
async fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -45,27 +44,39 @@ pub struct FolderData {
|
||||
}
|
||||
|
||||
#[post("/folders", data = "<data>")]
|
||||
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
||||
|
||||
folder.save(&conn)?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder);
|
||||
folder.save(&conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>", data = "<data>")]
|
||||
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, nt)
|
||||
async fn post_folder(
|
||||
uuid: String,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[put("/folders/<uuid>", data = "<data>")]
|
||||
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
async fn put_folder(
|
||||
uuid: String,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -76,20 +87,20 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
|
||||
|
||||
folder.name = data.Name;
|
||||
|
||||
folder.save(&conn)?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder);
|
||||
folder.save(&conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>/delete")]
|
||||
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt)
|
||||
async fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[delete("/folders/<uuid>")]
|
||||
fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
async fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -99,8 +110,8 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> Em
|
||||
}
|
||||
|
||||
// Delete the actual folder entry
|
||||
folder.delete(&conn)?;
|
||||
folder.delete(&conn).await?;
|
||||
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder);
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
mod accounts;
|
||||
pub mod accounts;
|
||||
mod ciphers;
|
||||
mod emergency_access;
|
||||
mod folders;
|
||||
@@ -7,13 +7,16 @@ mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use ciphers::purge_trashed_ciphers;
|
||||
pub use ciphers::{CipherSyncData, CipherSyncType};
|
||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||
pub use sends::purge_sends;
|
||||
pub use two_factor::send_incomplete_2fa_notifications;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut mod_routes =
|
||||
routes![clear_device_token, put_device_token, get_eq_domains, post_eq_domains, put_eq_domains, hibp_breach,];
|
||||
let mut device_token_routes = routes![clear_device_token, put_device_token];
|
||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||
let mut hibp_routes = routes![hibp_breach];
|
||||
let mut meta_routes = routes![alive, now, version, config];
|
||||
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut accounts::routes());
|
||||
@@ -23,7 +26,10 @@ pub fn routes() -> Vec<Route> {
|
||||
routes.append(&mut organizations::routes());
|
||||
routes.append(&mut two_factor::routes());
|
||||
routes.append(&mut sends::routes());
|
||||
routes.append(&mut mod_routes);
|
||||
routes.append(&mut device_token_routes);
|
||||
routes.append(&mut eq_domains_routes);
|
||||
routes.append(&mut hibp_routes);
|
||||
routes.append(&mut meta_routes);
|
||||
|
||||
routes
|
||||
}
|
||||
@@ -31,8 +37,9 @@ pub fn routes() -> Vec<Route> {
|
||||
//
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Catcher;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -121,7 +128,7 @@ struct EquivDomainData {
|
||||
}
|
||||
|
||||
#[post("/settings/domains", data = "<data>")]
|
||||
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EquivDomainData = data.into_inner().data;
|
||||
|
||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||
@@ -133,18 +140,18 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
|
||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
|
||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
|
||||
|
||||
user.save(&conn)?;
|
||||
user.save(&conn).await?;
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
#[put("/settings/domains", data = "<data>")]
|
||||
fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn)
|
||||
async fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
fn hibp_breach(username: String) -> JsonResult {
|
||||
async fn hibp_breach(username: String) -> JsonResult {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
@@ -153,14 +160,14 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let hibp_client = get_reqwest_client();
|
||||
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send()?;
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
|
||||
|
||||
// If we get a 404, return a 404, it means no breached accounts
|
||||
if res.status() == 404 {
|
||||
return Err(Error::empty().with_code(404));
|
||||
}
|
||||
|
||||
let value: Value = res.error_for_status()?.json()?;
|
||||
let value: Value = res.error_for_status()?.json().await?;
|
||||
Ok(Json(value))
|
||||
} else {
|
||||
Ok(Json(json!([{
|
||||
@@ -178,3 +185,54 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
}])))
|
||||
}
|
||||
}
|
||||
|
||||
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
||||
#[get("/alive")]
|
||||
fn alive(_conn: DbConn) -> Json<String> {
|
||||
now()
|
||||
}
|
||||
|
||||
#[get("/now")]
|
||||
pub fn now() -> Json<String> {
|
||||
Json(crate::util::format_date(&chrono::Utc::now().naive_utc()))
|
||||
}
|
||||
|
||||
#[get("/version")]
|
||||
fn version() -> Json<&'static str> {
|
||||
Json(crate::VERSION.unwrap_or_default())
|
||||
}
|
||||
|
||||
#[get("/config")]
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
Json(json!({
|
||||
"version": crate::VERSION,
|
||||
"gitHash": option_env!("GIT_REV"),
|
||||
"server": {
|
||||
"name": "Vaultwarden",
|
||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||
},
|
||||
"environment": {
|
||||
"vault": domain,
|
||||
"api": format!("{domain}/api"),
|
||||
"identity": format!("{domain}/identity"),
|
||||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
catchers![api_not_found]
|
||||
}
|
||||
|
||||
#[catch(404)]
|
||||
fn api_not_found() -> Json<Value> {
|
||||
Json(json!({
|
||||
"error": {
|
||||
"code": 404,
|
||||
"reason": "Not Found",
|
||||
"description": "The requested resource could not be found."
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,14 +1,15 @@
|
||||
use std::{io::Read, path::Path};
|
||||
use std::path::Path;
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
use rocket::{http::ContentType, response::NamedFile, Data};
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::form::Form;
|
||||
use rocket::fs::NamedFile;
|
||||
use rocket::fs::TempFile;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
||||
auth::{Headers, Host},
|
||||
auth::{ClientIp, Headers, Host},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::SafeString,
|
||||
CONFIG,
|
||||
@@ -16,6 +17,9 @@ use crate::{
|
||||
|
||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||
|
||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: u64 = 550_502_400;
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
get_sends,
|
||||
@@ -27,14 +31,16 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
put_send,
|
||||
delete_send,
|
||||
put_remove_password,
|
||||
download_send
|
||||
download_send,
|
||||
post_send_file_v2,
|
||||
post_send_file_v2_data
|
||||
]
|
||||
}
|
||||
|
||||
pub fn purge_sends(pool: DbPool) {
|
||||
pub async fn purge_sends(pool: DbPool) {
|
||||
debug!("Purging sends");
|
||||
if let Ok(conn) = pool.get() {
|
||||
Send::purge(&conn);
|
||||
if let Ok(conn) = pool.get().await {
|
||||
Send::purge(&conn).await;
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging sends")
|
||||
}
|
||||
@@ -57,6 +63,7 @@ struct SendData {
|
||||
Notes: Option<String>,
|
||||
Text: Option<Value>,
|
||||
File: Option<Value>,
|
||||
FileLength: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||
@@ -67,10 +74,11 @@ struct SendData {
|
||||
///
|
||||
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
||||
/// controls this policy globally.
|
||||
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
async fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::DisableSend;
|
||||
if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
if !CONFIG.sends_allowed()
|
||||
|| OrgPolicy::is_applicable_to_user(user_uuid, OrgPolicyType::DisableSend, None, conn).await
|
||||
{
|
||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||
}
|
||||
Ok(())
|
||||
@@ -82,10 +90,10 @@ fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult
|
||||
/// but is allowed to remove this option from an existing Send.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||
fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let hide_email = data.HideEmail.unwrap_or(false);
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn) {
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
||||
err!(
|
||||
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||
from recipients when creating or editing a Send."
|
||||
@@ -134,9 +142,9 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
}
|
||||
|
||||
#[get("/sends")]
|
||||
fn get_sends(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
async fn get_sends(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||
let sends_json: Vec<Value> = sends.iter().map(|s| s.to_json()).collect();
|
||||
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
||||
|
||||
Json(json!({
|
||||
"Data": sends_json,
|
||||
@@ -146,8 +154,8 @@ fn get_sends(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/sends/<uuid>")]
|
||||
fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let send = match Send::find_by_uuid(&uuid, &conn) {
|
||||
async fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let send = match Send::find_by_uuid(&uuid, &conn).await {
|
||||
Some(send) => send,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -160,50 +168,53 @@ fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[post("/sends", data = "<data>")]
|
||||
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn).await?;
|
||||
|
||||
if data.Type == SendType::File as i32 {
|
||||
err!("File sends should use /api/sends/file")
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
send.save(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn));
|
||||
send.save(&conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
struct UploadData<'f> {
|
||||
model: Json<crate::util::UpCase<SendData>>,
|
||||
data: TempFile<'f>,
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
struct UploadDataV2<'f> {
|
||||
data: TempFile<'f>,
|
||||
}
|
||||
|
||||
// @deprecated Mar 25 2021: This method has been deprecated in favor of direct uploads (v2).
|
||||
// This method still exists to support older clients, probably need to remove it sometime.
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L164-L167
|
||||
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn).await?;
|
||||
|
||||
let boundary = content_type.params().next().expect("No boundary provided").1;
|
||||
let UploadData {
|
||||
model,
|
||||
mut data,
|
||||
} = data.into_inner();
|
||||
let model = model.into_inner().data;
|
||||
|
||||
let mut mpart = Multipart::with_body(data.open(), boundary);
|
||||
|
||||
// First entry is the SendData JSON
|
||||
let mut model_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "model" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
|
||||
let mut buf = String::new();
|
||||
model_entry.data.read_to_string(&mut buf)?;
|
||||
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
||||
enforce_disable_hide_email_policy(&data.data, &headers, &conn)?;
|
||||
|
||||
// Get the file length and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: u64 = 550_502_400;
|
||||
enforce_disable_hide_email_policy(&model, &headers, &conn).await?;
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn).await;
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
@@ -212,55 +223,152 @@ fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
// Create the Send
|
||||
let mut send = create_send(data.data, headers.user.uuid)?;
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
|
||||
let mut send = create_send(model, headers.user.uuid)?;
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data
|
||||
{
|
||||
err!("Error reading send file data. Please try an other client.");
|
||||
}
|
||||
|
||||
// Read the data entry and save the file
|
||||
let mut data_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "data" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
let size = data.len();
|
||||
if size > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let size = match data_entry.data.save().memory_threshold(0).size_limit(size_limit).with_path(&file_path) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment is not a file: {:?}", other));
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment storage limit exceeded with this file: {:?}", reason));
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Error: {:?}", e));
|
||||
}
|
||||
};
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.persist_to(&file_path).await {
|
||||
data.move_copy_to(file_path).await?
|
||||
}
|
||||
|
||||
// Set ID and sizes
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id));
|
||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
|
||||
// Save the changes in the database
|
||||
send.save(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn));
|
||||
send.save(&conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
|
||||
#[post("/sends/file/v2", data = "<data>")]
|
||||
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn).await?;
|
||||
|
||||
let data = data.into_inner().data;
|
||||
|
||||
if data.Type != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn).await?;
|
||||
|
||||
let file_length = match &data.FileLength {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn).await;
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
||||
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
send.save(&conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"fileUploadType": 0, // 0 == Direct | 1 == Azure
|
||||
"object": "send-fileUpload",
|
||||
"url": format!("/sends/{}/file/{}", send.uuid, file_id),
|
||||
"sendResponse": send.to_json()
|
||||
})))
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243
|
||||
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||
async fn post_send_file_v2_data(
|
||||
send_uuid: String,
|
||||
file_id: String,
|
||||
data: Form<UploadDataV2<'_>>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
enforce_disable_send_policy(&headers, &conn).await?;
|
||||
|
||||
let mut data = data.into_inner();
|
||||
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data.data
|
||||
{
|
||||
err!("Error reading attachment data. Please try an other client.");
|
||||
}
|
||||
|
||||
if let Some(send) = Send::find_by_uuid(&send_uuid, &conn).await {
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send_uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn).await).await;
|
||||
} else {
|
||||
err!("Send not found. Unable to save the file.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendAccessData {
|
||||
@@ -268,8 +376,8 @@ pub struct SendAccessData {
|
||||
}
|
||||
|
||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &conn) {
|
||||
async fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
@@ -297,8 +405,8 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password."),
|
||||
None => err_code!("Password not provided", 401),
|
||||
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
||||
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -307,20 +415,20 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
||||
send.access_count += 1;
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
send.save(&conn).await?;
|
||||
|
||||
Ok(Json(send.to_json_access(&conn)))
|
||||
Ok(Json(send.to_json_access(&conn).await))
|
||||
}
|
||||
|
||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||
fn post_access_file(
|
||||
async fn post_access_file(
|
||||
send_id: String,
|
||||
file_id: String,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
host: Host,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
@@ -355,7 +463,7 @@ fn post_access_file(
|
||||
|
||||
send.access_count += 1;
|
||||
|
||||
send.save(&conn)?;
|
||||
send.save(&conn).await?;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
@@ -367,23 +475,29 @@ fn post_access_file(
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||
async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(&t) {
|
||||
if claims.sub == format!("{}/{}", send_id, file_id) {
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok();
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn put_send(
|
||||
id: String,
|
||||
data: JsonUpcase<SendData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&id, &conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -430,15 +544,15 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
|
||||
send.set_password(Some(&password));
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn));
|
||||
send.save(&conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[delete("/sends/<id>")]
|
||||
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &conn) {
|
||||
async fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -447,17 +561,17 @@ fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyR
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.delete(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&conn));
|
||||
send.delete(&conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&conn).await).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/sends/<id>/remove-password")]
|
||||
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&id, &conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -467,8 +581,8 @@ fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -
|
||||
}
|
||||
|
||||
send.set_password(None);
|
||||
send.save(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn));
|
||||
send.save(&conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use data_encoding::BASE32;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
@@ -21,7 +21,7 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -30,7 +30,7 @@ fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::Authenticator as i32;
|
||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn);
|
||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await;
|
||||
|
||||
let (enabled, key) = match twofactor {
|
||||
Some(tf) => (true, tf.data),
|
||||
@@ -53,7 +53,7 @@ struct EnableAuthenticatorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator(
|
||||
async fn activate_authenticator(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
@@ -81,9 +81,9 @@ fn activate_authenticator(
|
||||
}
|
||||
|
||||
// Validate the token provided with the key, and save new twofactor
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &conn)?;
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -93,16 +93,16 @@ fn activate_authenticator(
|
||||
}
|
||||
|
||||
#[put("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator_put(
|
||||
async fn activate_authenticator_put(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
activate_authenticator(data, headers, ip, conn)
|
||||
activate_authenticator(data, headers, ip, conn).await
|
||||
}
|
||||
|
||||
pub fn validate_totp_code_str(
|
||||
pub async fn validate_totp_code_str(
|
||||
user_uuid: &str,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
@@ -113,10 +113,16 @@ pub fn validate_totp_code_str(
|
||||
err!("TOTP code is not a number");
|
||||
}
|
||||
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn)
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await
|
||||
}
|
||||
|
||||
pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_totp_code(
|
||||
user_uuid: &str,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
use totp_lite::{totp_custom, Sha1};
|
||||
|
||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
||||
@@ -124,15 +130,16 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &C
|
||||
Err(_) => err!("Invalid TOTP secret"),
|
||||
};
|
||||
|
||||
let mut twofactor = match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn) {
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
let mut twofactor =
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
|
||||
// The amount of steps back and forward in time
|
||||
// Also check if we need to disable time drifted TOTP codes.
|
||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||
let steps = !CONFIG.authenticator_disable_time_drift() as i64;
|
||||
let steps = i64::from(!CONFIG.authenticator_disable_time_drift());
|
||||
|
||||
// Get the current system time in UNIX Epoch (UTC)
|
||||
let current_time = chrono::Utc::now();
|
||||
@@ -147,7 +154,7 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &C
|
||||
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
||||
|
||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
||||
if generated == totp_code && time_step > twofactor.last_used as i64 {
|
||||
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
|
||||
// If the step does not equals 0 the time is drifted either server or client side.
|
||||
if step != 0 {
|
||||
warn!("TOTP Time drift detected. The step offset is {}", step);
|
||||
@@ -156,9 +163,9 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &C
|
||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||
// This will also save a newly created twofactor if the code is correct.
|
||||
twofactor.last_used = time_step as i32;
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
|
||||
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use chrono::Utc;
|
||||
use data_encoding::BASE64;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
@@ -89,14 +89,14 @@ impl DuoStatus {
|
||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||
|
||||
#[post("/two-factor/get-duo", data = "<data>")]
|
||||
fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let data = get_user_duo_data(&headers.user.uuid, &conn);
|
||||
let data = get_user_duo_data(&headers.user.uuid, &conn).await;
|
||||
|
||||
let (enabled, data) = match data {
|
||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||
@@ -152,7 +152,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
||||
}
|
||||
|
||||
#[post("/two-factor/duo", data = "<data>")]
|
||||
fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EnableDuoData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -163,7 +163,7 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||
let data_req: DuoData = data.into();
|
||||
let data_str = serde_json::to_string(&data_req)?;
|
||||
duo_api_request("GET", "/auth/v2/check", "", &data_req).map_res("Failed to validate Duo credentials")?;
|
||||
duo_api_request("GET", "/auth/v2/check", "", &data_req).await.map_res("Failed to validate Duo credentials")?;
|
||||
(data_req.obscure(), data_str)
|
||||
} else {
|
||||
(DuoData::secret(), String::new())
|
||||
@@ -171,9 +171,9 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
|
||||
let type_ = TwoFactorType::Duo;
|
||||
let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str);
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -185,11 +185,11 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
}
|
||||
|
||||
#[put("/two-factor/duo", data = "<data>")]
|
||||
fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_duo(data, headers, conn)
|
||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_duo(data, headers, conn).await
|
||||
}
|
||||
|
||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
use reqwest::{header, Method};
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -209,7 +209,8 @@ fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> Em
|
||||
.basic_auth(username, Some(password))
|
||||
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||
.header(header::DATE, date)
|
||||
.send()?
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()?;
|
||||
|
||||
Ok(())
|
||||
@@ -222,11 +223,11 @@ const AUTH_PREFIX: &str = "AUTH";
|
||||
const DUO_PREFIX: &str = "TX";
|
||||
const APP_PREFIX: &str = "APP";
|
||||
|
||||
fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
async fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
let type_ = TwoFactorType::Duo as i32;
|
||||
|
||||
// If the user doesn't have an entry, disabled
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await {
|
||||
Some(t) => t,
|
||||
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
||||
};
|
||||
@@ -246,19 +247,20 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
}
|
||||
|
||||
// let (ik, sk, ak, host) = get_duo_keys();
|
||||
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
let data = User::find_by_mail(email, conn)
|
||||
.and_then(|u| get_user_duo_data(&u.uuid, conn).data())
|
||||
.or_else(DuoData::global)
|
||||
.map_res("Can't fetch Duo keys")?;
|
||||
async fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
let data = match User::find_by_mail(email, conn).await {
|
||||
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
||||
_ => DuoData::global(),
|
||||
}
|
||||
.map_res("Can't fetch Duo Keys")?;
|
||||
|
||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
|
||||
}
|
||||
|
||||
pub fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> {
|
||||
pub async fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> {
|
||||
let now = Utc::now().timestamp();
|
||||
|
||||
let (ik, sk, ak, host) = get_duo_keys_email(email, conn)?;
|
||||
let (ik, sk, ak, host) = get_duo_keys_email(email, conn).await?;
|
||||
|
||||
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
|
||||
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
|
||||
@@ -273,7 +275,7 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
|
||||
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
||||
}
|
||||
|
||||
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
// email is as entered by the user, so it needs to be normalized before
|
||||
// comparison with auth_user below.
|
||||
let email = &email.to_lowercase();
|
||||
@@ -288,7 +290,7 @@ pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyRe
|
||||
|
||||
let now = Utc::now().timestamp();
|
||||
|
||||
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn)?;
|
||||
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn).await?;
|
||||
|
||||
let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?;
|
||||
let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
@@ -28,13 +28,13 @@ struct SendEmailLoginData {
|
||||
/// User is trying to login and wants to use email 2FA.
|
||||
/// Does not require Bearer token
|
||||
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
||||
fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> EmptyResult {
|
||||
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailLoginData = data.into_inner().data;
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let user = match User::find_by_mail(&data.Email, &conn) {
|
||||
let user = match User::find_by_mail(&data.Email, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
};
|
||||
@@ -48,31 +48,32 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
|
||||
err!("Email 2FA is disabled")
|
||||
}
|
||||
|
||||
send_token(&user.uuid, &conn)?;
|
||||
send_token(&user.uuid, &conn).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate the token, save the data for later verification and send email to user
|
||||
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, conn).map_res("Two factor not found")?;
|
||||
let mut twofactor =
|
||||
TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?;
|
||||
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
|
||||
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
twofactor_data.set_token(generated_token);
|
||||
twofactor.data = twofactor_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When user clicks on Manage email 2FA show the user the related information
|
||||
#[post("/two-factor/get-email", data = "<data>")]
|
||||
fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -80,13 +81,14 @@ fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) ->
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, mfa_email) = match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &conn) {
|
||||
Some(x) => {
|
||||
let twofactor_data = EmailTokenData::from_json(&x.data)?;
|
||||
(true, json!(twofactor_data.email))
|
||||
}
|
||||
_ => (false, json!(null)),
|
||||
};
|
||||
let (enabled, mfa_email) =
|
||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &conn).await {
|
||||
Some(x) => {
|
||||
let twofactor_data = EmailTokenData::from_json(&x.data)?;
|
||||
(true, json!(twofactor_data.email))
|
||||
}
|
||||
_ => (false, json!(null)),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": mfa_email,
|
||||
@@ -105,7 +107,7 @@ struct SendEmailData {
|
||||
|
||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||
#[post("/two-factor/send-email", data = "<data>")]
|
||||
fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -119,8 +121,8 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
||||
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
|
||||
if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
tf.delete(&conn)?;
|
||||
if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await {
|
||||
tf.delete(&conn).await?;
|
||||
}
|
||||
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
@@ -128,9 +130,9 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
||||
|
||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&conn).await?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -145,7 +147,7 @@ struct EmailData {
|
||||
|
||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||
#[put("/two-factor/email", data = "<data>")]
|
||||
fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -154,7 +156,8 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).map_res("Two factor not found")?;
|
||||
let mut twofactor =
|
||||
TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await.map_res("Two factor not found")?;
|
||||
|
||||
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
|
||||
@@ -170,9 +173,9 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
email_data.reset_token();
|
||||
twofactor.atype = TwoFactorType::Email as i32;
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": email_data.email,
|
||||
@@ -182,9 +185,10 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
}
|
||||
|
||||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn)
|
||||
.await
|
||||
.map_res("Two factor not found")?;
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
@@ -197,14 +201,14 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
|
||||
email_data.reset_token();
|
||||
}
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
err!("Token is invalid")
|
||||
}
|
||||
|
||||
email_data.reset_token();
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
|
||||
let max_time = CONFIG.email_expiration_time() as i64;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use chrono::{Duration, Utc};
|
||||
use data_encoding::BASE32;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -15,17 +15,22 @@ use crate::{
|
||||
pub mod authenticator;
|
||||
pub mod duo;
|
||||
pub mod email;
|
||||
pub mod u2f;
|
||||
pub mod webauthn;
|
||||
pub mod yubikey;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut routes = routes![get_twofactor, get_recover, recover, disable_twofactor, disable_twofactor_put,];
|
||||
let mut routes = routes![
|
||||
get_twofactor,
|
||||
get_recover,
|
||||
recover,
|
||||
disable_twofactor,
|
||||
disable_twofactor_put,
|
||||
get_device_verification_settings,
|
||||
];
|
||||
|
||||
routes.append(&mut authenticator::routes());
|
||||
routes.append(&mut duo::routes());
|
||||
routes.append(&mut email::routes());
|
||||
routes.append(&mut u2f::routes());
|
||||
routes.append(&mut webauthn::routes());
|
||||
routes.append(&mut yubikey::routes());
|
||||
|
||||
@@ -33,8 +38,8 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[get("/two-factor")]
|
||||
fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
|
||||
async fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn).await;
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||
|
||||
Json(json!({
|
||||
@@ -68,13 +73,13 @@ struct RecoverTwoFactor {
|
||||
}
|
||||
|
||||
#[post("/two-factor/recover", data = "<data>")]
|
||||
fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
|
||||
async fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
|
||||
let data: RecoverTwoFactor = data.into_inner().data;
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||
let mut user = match User::find_by_mail(&data.Email, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
};
|
||||
@@ -90,19 +95,19 @@ fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
// Remove all twofactors from the user
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn).await?;
|
||||
|
||||
// Remove the recovery code, not needed without twofactors
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)?;
|
||||
user.save(&conn).await?;
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
fn _generate_recover_code(user: &mut User, conn: &DbConn) {
|
||||
async fn _generate_recover_code(user: &mut User, conn: &DbConn) {
|
||||
if user.totp_recover.is_none() {
|
||||
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
|
||||
user.totp_recover = Some(totp_recover);
|
||||
user.save(conn).ok();
|
||||
user.save(conn).await.ok();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +119,7 @@ struct DisableTwoFactorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/disable", data = "<data>")]
|
||||
fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: DisableTwoFactorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let user = headers.user;
|
||||
@@ -125,23 +130,24 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
|
||||
|
||||
let type_ = data.Type.into_i32()?;
|
||||
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
twofactor.delete(&conn)?;
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await {
|
||||
twofactor.delete(&conn).await?;
|
||||
}
|
||||
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).is_empty();
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).await.is_empty();
|
||||
|
||||
if twofactor_disabled {
|
||||
let policy_type = OrgPolicyType::TwoFactorAuthentication;
|
||||
let org_list = UserOrganization::find_by_user_and_policy(&user.uuid, policy_type, &conn);
|
||||
|
||||
for user_org in org_list.into_iter() {
|
||||
for user_org in
|
||||
UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &conn)
|
||||
.await
|
||||
.into_iter()
|
||||
{
|
||||
if user_org.atype < UserOrgType::Admin {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name)?;
|
||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).await.unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||
}
|
||||
user_org.delete(&conn)?;
|
||||
user_org.delete(&conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -154,18 +160,18 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
|
||||
}
|
||||
|
||||
#[put("/two-factor/disable", data = "<data>")]
|
||||
fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn)
|
||||
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||
debug!("Sending notifications for incomplete 2FA logins");
|
||||
|
||||
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let conn = match pool.get() {
|
||||
let conn = match pool.get().await {
|
||||
Ok(conn) => conn,
|
||||
_ => {
|
||||
error!("Failed to get DB connection in send_incomplete_2fa_notifications()");
|
||||
@@ -175,15 +181,35 @@ pub fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
|
||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&(now - time_limit), &conn);
|
||||
let time_before = now - time_limit;
|
||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &conn).await;
|
||||
for login in incomplete_logins {
|
||||
let user = User::find_by_uuid(&login.user_uuid, &conn).expect("User not found");
|
||||
let user = User::find_by_uuid(&login.user_uuid, &conn).await.expect("User not found");
|
||||
info!(
|
||||
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
|
||||
user.email, login.ip_address
|
||||
);
|
||||
mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name)
|
||||
.await
|
||||
.expect("Error sending incomplete 2FA email");
|
||||
login.delete(&conn).expect("Error deleting incomplete 2FA record");
|
||||
login.delete(&conn).await.expect("Error deleting incomplete 2FA record");
|
||||
}
|
||||
}
|
||||
|
||||
// This function currently is just a dummy and the actual part is not implemented yet.
|
||||
// This also prevents 404 errors.
|
||||
//
|
||||
// See the following Bitwarden PR's regarding this feature.
|
||||
// https://github.com/bitwarden/clients/pull/2843
|
||||
// https://github.com/bitwarden/clients/pull/2839
|
||||
// https://github.com/bitwarden/server/pull/2016
|
||||
//
|
||||
// The HTML part is hidden via the CSS patches done via the bw_web_build repo
|
||||
#[get("/two-factor/get-device-verification-settings")]
|
||||
fn get_device_verification_settings(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"isDeviceVerificationSectionEnabled":false,
|
||||
"unknownDeviceVerificationEnabled":false,
|
||||
"object":"deviceVerificationSettings"
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -1,352 +0,0 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use u2f::{
|
||||
messages::{RegisterResponse, SignResponse, U2fSignRequest},
|
||||
protocol::{Challenge, U2f},
|
||||
register::Registration,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
PasswordData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const U2F_VERSION: &str = "U2F_V2";
|
||||
|
||||
static APP_ID: Lazy<String> = Lazy::new(|| format!("{}/app-id.json", &CONFIG.domain()));
|
||||
static U2F: Lazy<U2f> = Lazy::new(|| U2f::new(APP_ID.clone()));
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![generate_u2f, generate_u2f_challenge, activate_u2f, activate_u2f_put, delete_u2f,]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-u2f", data = "<data>")]
|
||||
fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. U2F disabled")
|
||||
}
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, keys) = get_u2f_registrations(&headers.user.uuid, &conn)?;
|
||||
let keys_json: Vec<Value> = keys.iter().map(U2FRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": enabled,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
|
||||
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let _type = TwoFactorType::U2fRegisterChallenge;
|
||||
let challenge = _create_u2f_challenge(&headers.user.uuid, _type, &conn).challenge;
|
||||
|
||||
Ok(Json(json!({
|
||||
"UserId": headers.user.uuid,
|
||||
"AppId": APP_ID.to_string(),
|
||||
"Challenge": challenge,
|
||||
"Version": U2F_VERSION,
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EnableU2FData {
|
||||
Id: NumberOrString,
|
||||
// 1..5
|
||||
Name: String,
|
||||
MasterPasswordHash: String,
|
||||
DeviceResponse: String,
|
||||
}
|
||||
|
||||
// This struct is referenced from the U2F lib
|
||||
// because it doesn't implement Deserialize
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(remote = "Registration")]
|
||||
struct RegistrationDef {
|
||||
key_handle: Vec<u8>,
|
||||
pub_key: Vec<u8>,
|
||||
attestation_cert: Option<Vec<u8>>,
|
||||
device_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct U2FRegistration {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
#[serde(with = "RegistrationDef")]
|
||||
pub reg: Registration,
|
||||
pub counter: u32,
|
||||
compromised: bool,
|
||||
pub migrated: Option<bool>,
|
||||
}
|
||||
|
||||
impl U2FRegistration {
|
||||
fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.id,
|
||||
"Name": self.name,
|
||||
"Compromised": self.compromised,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// This struct is copied from the U2F lib
|
||||
// to add an optional error code
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RegisterResponseCopy {
|
||||
pub registration_data: String,
|
||||
pub version: String,
|
||||
pub client_data: String,
|
||||
|
||||
pub error_code: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
impl From<RegisterResponseCopy> for RegisterResponse {
|
||||
fn from(r: RegisterResponseCopy) -> RegisterResponse {
|
||||
RegisterResponse {
|
||||
registration_data: r.registration_data,
|
||||
version: r.version,
|
||||
client_data: r.client_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/two-factor/u2f", data = "<data>")]
|
||||
fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EnableU2FData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let tf_type = TwoFactorType::U2fRegisterChallenge as i32;
|
||||
let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn) {
|
||||
Some(c) => c,
|
||||
None => err!("Can't recover challenge"),
|
||||
};
|
||||
|
||||
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
||||
tf_challenge.delete(&conn)?;
|
||||
|
||||
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
|
||||
|
||||
let error_code = response.error_code.clone().map_or("0".into(), NumberOrString::into_string);
|
||||
|
||||
if error_code != "0" {
|
||||
err!("Error registering U2F token")
|
||||
}
|
||||
|
||||
let registration = U2F.register_response(challenge, response.into())?;
|
||||
let full_registration = U2FRegistration {
|
||||
id: data.Id.into_i32()?,
|
||||
name: data.Name,
|
||||
reg: registration,
|
||||
compromised: false,
|
||||
counter: 0,
|
||||
migrated: None,
|
||||
};
|
||||
|
||||
let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1;
|
||||
|
||||
// TODO: Check that there is no repeat Id
|
||||
regs.push(full_registration);
|
||||
save_u2f_registrations(&user.uuid, ®s, &conn)?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
|
||||
let keys_json: Vec<Value> = regs.iter().map(U2FRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/two-factor/u2f", data = "<data>")]
|
||||
fn activate_u2f_put(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_u2f(data, headers, conn)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct DeleteU2FData {
|
||||
Id: NumberOrString,
|
||||
MasterPasswordHash: String,
|
||||
}
|
||||
|
||||
#[delete("/two-factor/u2f", data = "<data>")]
|
||||
fn delete_u2f(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: DeleteU2FData = data.into_inner().data;
|
||||
|
||||
let id = data.Id.into_i32()?;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::U2f as i32;
|
||||
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, type_, &conn) {
|
||||
Some(tf) => tf,
|
||||
None => err!("U2F data not found!"),
|
||||
};
|
||||
|
||||
let mut data: Vec<U2FRegistration> = match serde_json::from_str(&tf.data) {
|
||||
Ok(d) => d,
|
||||
Err(_) => err!("Error parsing U2F data"),
|
||||
};
|
||||
|
||||
data.retain(|r| r.id != id);
|
||||
|
||||
let new_data_str = serde_json::to_string(&data)?;
|
||||
|
||||
tf.data = new_data_str;
|
||||
tf.save(&conn)?;
|
||||
|
||||
let keys_json: Vec<Value> = data.iter().map(U2FRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge {
|
||||
let challenge = U2F.generate_challenge().unwrap();
|
||||
|
||||
TwoFactor::new(user_uuid.into(), type_, serde_json::to_string(&challenge).unwrap())
|
||||
.save(conn)
|
||||
.expect("Error saving challenge");
|
||||
|
||||
challenge
|
||||
}
|
||||
|
||||
fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult {
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(conn)
|
||||
}
|
||||
|
||||
fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2FRegistration>), Error> {
|
||||
let type_ = TwoFactorType::U2f as i32;
|
||||
let (enabled, regs) = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
Some(tf) => (tf.enabled, tf.data),
|
||||
None => return Ok((false, Vec::new())), // If no data, return empty list
|
||||
};
|
||||
|
||||
let data = match serde_json::from_str(®s) {
|
||||
Ok(d) => d,
|
||||
Err(_) => {
|
||||
// If error, try old format
|
||||
let mut old_regs = _old_parse_registrations(®s);
|
||||
|
||||
if old_regs.len() != 1 {
|
||||
err!("The old U2F format only allows one device")
|
||||
}
|
||||
|
||||
// Convert to new format
|
||||
let new_regs = vec![U2FRegistration {
|
||||
id: 1,
|
||||
name: "Unnamed U2F key".into(),
|
||||
reg: old_regs.remove(0),
|
||||
compromised: false,
|
||||
counter: 0,
|
||||
migrated: None,
|
||||
}];
|
||||
|
||||
// Save new format
|
||||
save_u2f_registrations(user_uuid, &new_regs, conn)?;
|
||||
|
||||
new_regs
|
||||
}
|
||||
};
|
||||
|
||||
Ok((enabled, data))
|
||||
}
|
||||
|
||||
fn _old_parse_registrations(registations: &str) -> Vec<Registration> {
|
||||
#[derive(Deserialize)]
|
||||
struct Helper(#[serde(with = "RegistrationDef")] Registration);
|
||||
|
||||
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
|
||||
|
||||
regs.into_iter().map(|r| serde_json::from_value(r).unwrap()).map(|Helper(r)| r).collect()
|
||||
}
|
||||
|
||||
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
|
||||
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
|
||||
|
||||
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.reg).collect();
|
||||
|
||||
if registrations.is_empty() {
|
||||
err!("No U2F devices registered")
|
||||
}
|
||||
|
||||
Ok(U2F.sign_request(challenge, registrations))
|
||||
}
|
||||
|
||||
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
|
||||
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, conn);
|
||||
|
||||
let challenge = match tf_challenge {
|
||||
Some(tf_challenge) => {
|
||||
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
||||
tf_challenge.delete(conn)?;
|
||||
challenge
|
||||
}
|
||||
None => err!("Can't recover login challenge"),
|
||||
};
|
||||
let response: SignResponse = serde_json::from_str(response)?;
|
||||
let mut registrations = get_u2f_registrations(user_uuid, conn)?.1;
|
||||
if registrations.is_empty() {
|
||||
err!("No U2F devices registered")
|
||||
}
|
||||
|
||||
for reg in &mut registrations {
|
||||
let response = U2F.sign_response(challenge.clone(), reg.reg.clone(), response.clone(), reg.counter);
|
||||
match response {
|
||||
Ok(new_counter) => {
|
||||
reg.counter = new_counter;
|
||||
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
Err(u2f::u2ferror::U2fError::CounterTooLow) => {
|
||||
reg.compromised = true;
|
||||
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||
|
||||
err!("This device might be compromised!");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("E {:#}", e);
|
||||
// break;
|
||||
}
|
||||
}
|
||||
}
|
||||
err!("error verifying response")
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use url::Url;
|
||||
use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn};
|
||||
@@ -21,6 +21,28 @@ pub fn routes() -> Vec<Route> {
|
||||
routes![get_webauthn, generate_webauthn_challenge, activate_webauthn, activate_webauthn_put, delete_webauthn,]
|
||||
}
|
||||
|
||||
// Some old u2f structs still needed for migrating from u2f to WebAuthn
|
||||
// Both `struct Registration` and `struct U2FRegistration` can be removed if we remove the u2f to WebAuthn migration
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Registration {
|
||||
pub key_handle: Vec<u8>,
|
||||
pub pub_key: Vec<u8>,
|
||||
pub attestation_cert: Option<Vec<u8>>,
|
||||
pub device_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct U2FRegistration {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
#[serde(with = "Registration")]
|
||||
pub reg: Registration,
|
||||
pub counter: u32,
|
||||
compromised: bool,
|
||||
pub migrated: Option<bool>,
|
||||
}
|
||||
|
||||
struct WebauthnConfig {
|
||||
url: String,
|
||||
origin: Url,
|
||||
@@ -80,7 +102,7 @@ impl WebauthnRegistration {
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||
fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||
}
|
||||
@@ -89,7 +111,7 @@ fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn)
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &conn)?;
|
||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &conn).await?;
|
||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -100,12 +122,13 @@ fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn)
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||
fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &conn)?
|
||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &conn)
|
||||
.await?
|
||||
.1
|
||||
.into_iter()
|
||||
.map(|r| r.credential.cred_id) // We return the credentialIds to the clients to avoid double registering
|
||||
@@ -121,7 +144,7 @@ fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers,
|
||||
)?;
|
||||
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&conn)?;
|
||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&conn).await?;
|
||||
|
||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||
challenge_value["status"] = "ok".into();
|
||||
@@ -218,7 +241,7 @@ impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||
}
|
||||
|
||||
#[post("/two-factor/webauthn", data = "<data>")]
|
||||
fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EnableWebauthnData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -228,10 +251,10 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
|
||||
// Retrieve and delete the saved challenge state
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await {
|
||||
Some(tf) => {
|
||||
let state: RegistrationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(&conn)?;
|
||||
tf.delete(&conn).await?;
|
||||
state
|
||||
}
|
||||
None => err!("Can't recover challenge"),
|
||||
@@ -241,7 +264,7 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
let (credential, _data) =
|
||||
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
|
||||
|
||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn)?.1;
|
||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn).await?.1;
|
||||
// TODO: Check for repeated ID's
|
||||
registrations.push(WebauthnRegistration {
|
||||
id: data.Id.into_i32()?,
|
||||
@@ -252,8 +275,10 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
});
|
||||
|
||||
// Save the registrations and return them
|
||||
TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?).save(&conn)?;
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(&conn)
|
||||
.await?;
|
||||
_generate_recover_code(&mut user, &conn).await;
|
||||
|
||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
@@ -264,8 +289,8 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
}
|
||||
|
||||
#[put("/two-factor/webauthn", data = "<data>")]
|
||||
fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn)
|
||||
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -276,13 +301,14 @@ struct DeleteU2FData {
|
||||
}
|
||||
|
||||
#[delete("/two-factor/webauthn", data = "<data>")]
|
||||
fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let id = data.data.Id.into_i32()?;
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn) {
|
||||
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn).await
|
||||
{
|
||||
Some(tf) => tf,
|
||||
None => err!("Webauthn data not found!"),
|
||||
};
|
||||
@@ -296,12 +322,12 @@ fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbCo
|
||||
|
||||
let removed_item = data.remove(item_pos);
|
||||
tf.data = serde_json::to_string(&data)?;
|
||||
tf.save(&conn)?;
|
||||
tf.save(&conn).await?;
|
||||
drop(tf);
|
||||
|
||||
// If entry is migrated from u2f, delete the u2f entry as well
|
||||
if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn) {
|
||||
use crate::api::core::two_factor::u2f::U2FRegistration;
|
||||
if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn).await
|
||||
{
|
||||
let mut data: Vec<U2FRegistration> = match serde_json::from_str(&u2f.data) {
|
||||
Ok(d) => d,
|
||||
Err(_) => err!("Error parsing U2F data"),
|
||||
@@ -311,7 +337,7 @@ fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbCo
|
||||
let new_data_str = serde_json::to_string(&data)?;
|
||||
|
||||
u2f.data = new_data_str;
|
||||
u2f.save(&conn)?;
|
||||
u2f.save(&conn).await?;
|
||||
}
|
||||
|
||||
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
||||
@@ -323,18 +349,21 @@ fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbCo
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn get_webauthn_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||
pub async fn get_webauthn_registrations(
|
||||
user_uuid: &str,
|
||||
conn: &DbConn,
|
||||
) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||
let type_ = TwoFactorType::Webauthn as i32;
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
||||
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
|
||||
None => Ok((false, Vec::new())), // If no data, return empty list
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||
pub async fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||
// Load saved credentials
|
||||
let creds: Vec<Credential> =
|
||||
get_webauthn_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.credential).collect();
|
||||
get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect();
|
||||
|
||||
if creds.is_empty() {
|
||||
err!("No Webauthn devices registered")
|
||||
@@ -346,18 +375,19 @@ pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||
|
||||
// Save the challenge state for later validation
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
||||
.save(conn)?;
|
||||
.save(conn)
|
||||
.await?;
|
||||
|
||||
// Return challenge to the clients
|
||||
Ok(Json(serde_json::to_value(response.public_key)?))
|
||||
}
|
||||
|
||||
pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
||||
Some(tf) => {
|
||||
let state: AuthenticationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(conn)?;
|
||||
tf.delete(conn).await?;
|
||||
state
|
||||
}
|
||||
None => err!("Can't recover login challenge"),
|
||||
@@ -366,7 +396,7 @@ pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -
|
||||
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
|
||||
let rsp: PublicKeyCredential = rsp.data.into();
|
||||
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn)?.1;
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
|
||||
|
||||
// If the credential we received is migrated from U2F, enable the U2F compatibility
|
||||
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
|
||||
@@ -377,7 +407,8 @@ pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -
|
||||
reg.credential.counter = auth_data.counter;
|
||||
|
||||
TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(conn)?;
|
||||
.save(conn)
|
||||
.await?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use yubico::{config::Config, verify};
|
||||
|
||||
@@ -78,7 +78,7 @@ fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-yubikey", data = "<data>")]
|
||||
fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
// Make sure the credentials are set
|
||||
get_yubico_credentials()?;
|
||||
|
||||
@@ -92,7 +92,7 @@ fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbCo
|
||||
let user_uuid = &user.uuid;
|
||||
let yubikey_type = TwoFactorType::YubiKey as i32;
|
||||
|
||||
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn);
|
||||
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn).await;
|
||||
|
||||
if let Some(r) = r {
|
||||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
|
||||
@@ -113,7 +113,7 @@ fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbCo
|
||||
}
|
||||
|
||||
#[post("/two-factor/yubikey", data = "<data>")]
|
||||
fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EnableYubikeyData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -122,10 +122,11 @@ fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn:
|
||||
}
|
||||
|
||||
// Check if we already have some data
|
||||
let mut yubikey_data = match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn) {
|
||||
Some(data) => data,
|
||||
None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()),
|
||||
};
|
||||
let mut yubikey_data =
|
||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn).await {
|
||||
Some(data) => data,
|
||||
None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()),
|
||||
};
|
||||
|
||||
let yubikeys = parse_yubikeys(&data);
|
||||
|
||||
@@ -146,7 +147,7 @@ fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn:
|
||||
verify_yubikey_otp(yubikey.to_owned()).map_res("Invalid Yubikey OTP provided")?;
|
||||
}
|
||||
|
||||
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (&x[..12]).to_owned()).collect();
|
||||
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
|
||||
|
||||
let yubikey_metadata = YubikeyMetadata {
|
||||
Keys: yubikey_ids,
|
||||
@@ -154,9 +155,9 @@ fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn:
|
||||
};
|
||||
|
||||
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
|
||||
yubikey_data.save(&conn)?;
|
||||
yubikey_data.save(&conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &conn).await;
|
||||
|
||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
||||
|
||||
@@ -168,8 +169,8 @@ fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn:
|
||||
}
|
||||
|
||||
#[put("/two-factor/yubikey", data = "<data>")]
|
||||
fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn)
|
||||
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn).await
|
||||
}
|
||||
|
||||
pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {
|
||||
|
||||
629
src/api/icons.rs
629
src/api/icons.rs
@@ -1,20 +1,25 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::prelude::*,
|
||||
net::{IpAddr, ToSocketAddrs},
|
||||
sync::{Arc, RwLock},
|
||||
net::IpAddr,
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::{stream::StreamExt, TryFutureExt};
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::{blocking::Client, blocking::Response, header};
|
||||
use rocket::{
|
||||
http::ContentType,
|
||||
response::{Content, Redirect},
|
||||
Route,
|
||||
use reqwest::{
|
||||
header::{self, HeaderMap, HeaderValue},
|
||||
Client, Response,
|
||||
};
|
||||
use rocket::{http::ContentType, response::Redirect, Route};
|
||||
use tokio::{
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
net::lookup_host,
|
||||
};
|
||||
|
||||
use html5gum::{Emitter, EndTag, HtmlString, InfallibleTokenizer, Readable, StartTag, StringReader, Tokenizer};
|
||||
|
||||
use crate::{
|
||||
error::Error,
|
||||
@@ -25,48 +30,56 @@ use crate::{
|
||||
pub fn routes() -> Vec<Route> {
|
||||
match CONFIG.icon_service().as_str() {
|
||||
"internal" => routes![icon_internal],
|
||||
"bitwarden" => routes![icon_bitwarden],
|
||||
"duckduckgo" => routes![icon_duckduckgo],
|
||||
"google" => routes![icon_google],
|
||||
_ => routes![icon_custom],
|
||||
_ => routes![icon_external],
|
||||
}
|
||||
}
|
||||
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Generate the default headers
|
||||
let mut default_headers = header::HeaderMap::new();
|
||||
default_headers
|
||||
.insert(header::USER_AGENT, header::HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)"));
|
||||
default_headers
|
||||
.insert(header::ACCEPT, header::HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en,*;q=0.1"));
|
||||
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
|
||||
let mut default_headers = HeaderMap::new();
|
||||
default_headers.insert(header::USER_AGENT, HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)"));
|
||||
default_headers.insert(header::ACCEPT, HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en,*;q=0.1"));
|
||||
default_headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, HeaderValue::from_static("no-cache"));
|
||||
|
||||
// Generate the cookie store
|
||||
let cookie_store = Arc::new(Jar::default());
|
||||
|
||||
// Reuse the client between requests
|
||||
get_reqwest_client_builder()
|
||||
.cookie_provider(Arc::new(Jar::default()))
|
||||
let client = get_reqwest_client_builder()
|
||||
.cookie_provider(Arc::clone(&cookie_store))
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(default_headers)
|
||||
.build()
|
||||
.expect("Failed to build icon client")
|
||||
.default_headers(default_headers.clone());
|
||||
|
||||
match client.build() {
|
||||
Ok(client) => client,
|
||||
Err(e) => {
|
||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
||||
get_reqwest_client_builder()
|
||||
.cookie_provider(cookie_store)
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(default_headers)
|
||||
.trust_dns(false)
|
||||
.build()
|
||||
.expect("Failed to build client")
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Build Regex only once since this takes a lot of time.
|
||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||
static ICON_REL_BLACKLIST: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)mask-icon").unwrap());
|
||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||
|
||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
|
||||
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
|
||||
|
||||
fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||
if !is_valid_domain(domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return None;
|
||||
}
|
||||
|
||||
if is_domain_blacklisted(domain) {
|
||||
if is_domain_blacklisted(domain).await {
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -84,47 +97,28 @@ fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon_custom(domain: String) -> Option<Redirect> {
|
||||
icon_redirect(&domain, &CONFIG.icon_service())
|
||||
async fn icon_external(domain: String) -> Option<Redirect> {
|
||||
icon_redirect(&domain, &CONFIG._icon_service_url()).await
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon_bitwarden(domain: String) -> Option<Redirect> {
|
||||
icon_redirect(&domain, "https://icons.bitwarden.net/{}/icon.png")
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon_duckduckgo(domain: String) -> Option<Redirect> {
|
||||
icon_redirect(&domain, "https://icons.duckduckgo.com/ip3/{}.ico")
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon_google(domain: String) -> Option<Redirect> {
|
||||
icon_redirect(&domain, "https://www.google.com/s2/favicons?domain={}&sz=32")
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon_internal(domain: String) -> Cached<Content<Vec<u8>>> {
|
||||
async fn icon_internal(domain: String) -> Cached<(ContentType, Vec<u8>)> {
|
||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return Cached::ttl(
|
||||
Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
match get_icon(&domain) {
|
||||
match get_icon(&domain).await {
|
||||
Some((icon, icon_type)) => {
|
||||
Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
|
||||
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
|
||||
}
|
||||
_ => Cached::ttl(
|
||||
Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
true,
|
||||
),
|
||||
_ => Cached::ttl((ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl(), true),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,68 +258,57 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
|
||||
&& (domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|x| {
|
||||
for ip_port in x {
|
||||
if !is_global(ip_port.ip()) {
|
||||
warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain);
|
||||
return true;
|
||||
}
|
||||
use cached::proc_macro::cached;
|
||||
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
|
||||
#[allow(clippy::unused_async)] // This is needed because cached causes a false-positive here.
|
||||
async fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
if CONFIG.icon_blacklist_non_global_ips() {
|
||||
if let Ok(s) = lookup_host((domain, 0)).await {
|
||||
for addr in s {
|
||||
if !is_global(addr.ip()) {
|
||||
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
|
||||
return true;
|
||||
}
|
||||
false
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
// Skip the regex check if the previous one is true already
|
||||
if !is_blacklisted {
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||
let regex = if let Some(regex) = regex_hashmap.get(&blacklist) {
|
||||
regex
|
||||
} else {
|
||||
drop(regex_hashmap);
|
||||
|
||||
let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().unwrap();
|
||||
// Clear the current list if the previous key doesn't exists.
|
||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||
if regex_hashmap_write.len() >= 1 {
|
||||
regex_hashmap_write.clear();
|
||||
}
|
||||
|
||||
// Generate the regex to store in too the Lazy Static HashMap.
|
||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||
regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex);
|
||||
drop(regex_hashmap_write);
|
||||
|
||||
regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||
regex_hashmap.get(&blacklist).unwrap()
|
||||
};
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap.
|
||||
if regex.is_match(domain) {
|
||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
||||
is_blacklisted = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
is_blacklisted
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
|
||||
regex.is_match(domain)
|
||||
} else {
|
||||
// Clear the current list if the previous key doesn't exists.
|
||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||
if ICON_BLACKLIST_REGEX.len() >= 1 {
|
||||
ICON_BLACKLIST_REGEX.clear();
|
||||
}
|
||||
|
||||
// Generate the regex to store in too the Lazy Static HashMap.
|
||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||
let is_match = blacklist_regex.is_match(domain);
|
||||
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
|
||||
|
||||
is_match
|
||||
};
|
||||
|
||||
if is_match {
|
||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
|
||||
// Check for expiration of negatively cached copy
|
||||
if icon_is_negcached(&path) {
|
||||
if icon_is_negcached(&path).await {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(icon) = get_cached_icon(&path) {
|
||||
if let Some(icon) = get_cached_icon(&path).await {
|
||||
let icon_type = match get_icon_type(&icon) {
|
||||
Some(x) => x,
|
||||
_ => "x-icon",
|
||||
@@ -338,31 +321,31 @@ fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
}
|
||||
|
||||
// Get the icon, or None in case of error
|
||||
match download_icon(domain) {
|
||||
match download_icon(domain).await {
|
||||
Ok((icon, icon_type)) => {
|
||||
save_icon(&path, &icon);
|
||||
Some((icon, icon_type.unwrap_or("x-icon").to_string()))
|
||||
save_icon(&path, &icon).await;
|
||||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Unable to download icon: {:?}", e);
|
||||
let miss_indicator = path + ".miss";
|
||||
save_icon(&miss_indicator, &[]);
|
||||
save_icon(&miss_indicator, &[]).await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
// Check for expiration of successfully cached copy
|
||||
if icon_is_expired(path) {
|
||||
if icon_is_expired(path).await {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Try to read the cached icon, and return it if it exists
|
||||
if let Ok(mut f) = File::open(path) {
|
||||
if let Ok(mut f) = File::open(path).await {
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
if f.read_to_end(&mut buffer).is_ok() {
|
||||
if f.read_to_end(&mut buffer).await.is_ok() {
|
||||
return Some(buffer);
|
||||
}
|
||||
}
|
||||
@@ -370,22 +353,22 @@ fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||
let meta = symlink_metadata(path)?;
|
||||
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||
let meta = symlink_metadata(path).await?;
|
||||
let modified = meta.modified()?;
|
||||
let age = SystemTime::now().duration_since(modified)?;
|
||||
|
||||
Ok(ttl > 0 && ttl <= age.as_secs())
|
||||
}
|
||||
|
||||
fn icon_is_negcached(path: &str) -> bool {
|
||||
async fn icon_is_negcached(path: &str) -> bool {
|
||||
let miss_indicator = path.to_owned() + ".miss";
|
||||
let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl());
|
||||
let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl()).await;
|
||||
|
||||
match expired {
|
||||
// No longer negatively cached, drop the marker
|
||||
Ok(true) => {
|
||||
if let Err(e) = remove_file(&miss_indicator) {
|
||||
if let Err(e) = remove_file(&miss_indicator).await {
|
||||
error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e);
|
||||
}
|
||||
false
|
||||
@@ -397,8 +380,8 @@ fn icon_is_negcached(path: &str) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
fn icon_is_expired(path: &str) -> bool {
|
||||
let expired = file_is_expired(path, CONFIG.icon_cache_ttl());
|
||||
async fn icon_is_expired(path: &str) -> bool {
|
||||
let expired = file_is_expired(path, CONFIG.icon_cache_ttl()).await;
|
||||
expired.unwrap_or(true)
|
||||
}
|
||||
|
||||
@@ -416,91 +399,62 @@ impl Icon {
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates over the HTML document to find <base href="http://domain.tld">
|
||||
/// When found it will stop the iteration and the found base href will be shared deref via `base_href`.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `node` - A Parsed HTML document via html5ever::parse_document()
|
||||
/// * `base_href` - a mutable url::Url which will be overwritten when a base href tag has been found.
|
||||
///
|
||||
fn get_base_href(node: &std::rc::Rc<markup5ever_rcdom::Node>, base_href: &mut url::Url) -> bool {
|
||||
if let markup5ever_rcdom::NodeData::Element {
|
||||
name,
|
||||
attrs,
|
||||
..
|
||||
} = &node.data
|
||||
{
|
||||
if name.local.as_ref() == "base" {
|
||||
let attrs = attrs.borrow();
|
||||
for attr in attrs.iter() {
|
||||
let attr_name = attr.name.local.as_ref();
|
||||
let attr_value = attr.value.as_ref();
|
||||
fn get_favicons_node(
|
||||
dom: InfallibleTokenizer<StringReader<'_>, FaviconEmitter>,
|
||||
icons: &mut Vec<Icon>,
|
||||
url: &url::Url,
|
||||
) {
|
||||
const TAG_LINK: &[u8] = b"link";
|
||||
const TAG_BASE: &[u8] = b"base";
|
||||
const TAG_HEAD: &[u8] = b"head";
|
||||
const ATTR_REL: &[u8] = b"rel";
|
||||
const ATTR_HREF: &[u8] = b"href";
|
||||
const ATTR_SIZES: &[u8] = b"sizes";
|
||||
|
||||
if attr_name == "href" {
|
||||
debug!("Found base href: {}", attr_value);
|
||||
*base_href = match base_href.join(attr_value) {
|
||||
Ok(href) => href,
|
||||
_ => base_href.clone(),
|
||||
};
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Might want to limit the recursion depth?
|
||||
for child in node.children.borrow().iter() {
|
||||
// Check if we got a true back and stop the iter.
|
||||
// This means we found a <base> tag and can stop processing the html.
|
||||
if get_base_href(child, base_href) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Vec<Icon>, url: &url::Url) {
|
||||
if let markup5ever_rcdom::NodeData::Element {
|
||||
name,
|
||||
attrs,
|
||||
..
|
||||
} = &node.data
|
||||
{
|
||||
if name.local.as_ref() == "link" {
|
||||
let mut has_rel = false;
|
||||
let mut href = None;
|
||||
let mut sizes = None;
|
||||
|
||||
let attrs = attrs.borrow();
|
||||
for attr in attrs.iter() {
|
||||
let attr_name = attr.name.local.as_ref();
|
||||
let attr_value = attr.value.as_ref();
|
||||
|
||||
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) && !ICON_REL_BLACKLIST.is_match(attr_value)
|
||||
let mut base_url = url.clone();
|
||||
let mut icon_tags: Vec<StartTag> = Vec::new();
|
||||
for token in dom {
|
||||
match token {
|
||||
FaviconToken::StartTag(tag) => {
|
||||
if *tag.name == TAG_LINK
|
||||
&& tag.attributes.contains_key(ATTR_REL)
|
||||
&& tag.attributes.contains_key(ATTR_HREF)
|
||||
{
|
||||
has_rel = true;
|
||||
} else if attr_name == "href" {
|
||||
href = Some(attr_value);
|
||||
} else if attr_name == "sizes" {
|
||||
sizes = Some(attr_value);
|
||||
let rel_value = std::str::from_utf8(tag.attributes.get(ATTR_REL).unwrap())
|
||||
.unwrap_or_default()
|
||||
.to_ascii_lowercase();
|
||||
if rel_value.contains("icon") && !rel_value.contains("mask-icon") {
|
||||
icon_tags.push(tag);
|
||||
}
|
||||
} else if *tag.name == TAG_BASE && tag.attributes.contains_key(ATTR_HREF) {
|
||||
let href = std::str::from_utf8(tag.attributes.get(ATTR_HREF).unwrap()).unwrap_or_default();
|
||||
debug!("Found base href: {href}");
|
||||
base_url = match base_url.join(href) {
|
||||
Ok(inner_url) => inner_url,
|
||||
_ => url.clone(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if has_rel {
|
||||
if let Some(inner_href) = href {
|
||||
if let Ok(full_href) = url.join(inner_href).map(String::from) {
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
icons.push(Icon::new(priority, full_href));
|
||||
}
|
||||
FaviconToken::EndTag(tag) => {
|
||||
if *tag.name == TAG_HEAD {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Might want to limit the recursion depth?
|
||||
for child in node.children.borrow().iter() {
|
||||
get_favicons_node(child, icons, url);
|
||||
for icon_tag in icon_tags {
|
||||
if let Some(icon_href) = icon_tag.attributes.get(ATTR_HREF) {
|
||||
if let Ok(full_href) = base_url.join(std::str::from_utf8(icon_href).unwrap_or_default()) {
|
||||
let sizes = if let Some(v) = icon_tag.attributes.get(ATTR_SIZES) {
|
||||
std::str::from_utf8(v).unwrap_or_default()
|
||||
} else {
|
||||
""
|
||||
};
|
||||
let priority = get_icon_priority(full_href.as_str(), sizes);
|
||||
icons.push(Icon::new(priority, full_href.to_string()));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -518,16 +472,16 @@ struct IconUrlResult {
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let icon_result = get_icon_url("github.com")?;
|
||||
/// let icon_result = get_icon_url("vaultwarden.discourse.group")?;
|
||||
/// let icon_result = get_icon_url("github.com").await?;
|
||||
/// let icon_result = get_icon_url("vaultwarden.discourse.group").await?;
|
||||
/// ```
|
||||
fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
// Default URL with secure and insecure schemes
|
||||
let ssldomain = format!("https://{}", domain);
|
||||
let httpdomain = format!("http://{}", domain);
|
||||
let ssldomain = format!("https://{domain}");
|
||||
let httpdomain = format!("http://{domain}");
|
||||
|
||||
// First check the domain as given during the request for both HTTPS and HTTP.
|
||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)) {
|
||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
|
||||
Ok(c) => Ok(c),
|
||||
Err(e) => {
|
||||
let mut sub_resp = Err(e);
|
||||
@@ -542,25 +496,24 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
base = domain_parts.next_back().unwrap()
|
||||
);
|
||||
if is_valid_domain(&base_domain) {
|
||||
let sslbase = format!("https://{}", base_domain);
|
||||
let httpbase = format!("http://{}", base_domain);
|
||||
debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain);
|
||||
let sslbase = format!("https://{base_domain}");
|
||||
let httpbase = format!("http://{base_domain}");
|
||||
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
||||
|
||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase));
|
||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
||||
}
|
||||
|
||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||
let www_domain = format!("www.{}", domain);
|
||||
let www_domain = format!("www.{domain}");
|
||||
if is_valid_domain(&www_domain) {
|
||||
let sslwww = format!("https://{}", www_domain);
|
||||
let httpwww = format!("http://{}", www_domain);
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain);
|
||||
let sslwww = format!("https://{www_domain}");
|
||||
let httpwww = format!("http://{www_domain}");
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
||||
|
||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww));
|
||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
||||
}
|
||||
}
|
||||
|
||||
sub_resp
|
||||
}
|
||||
};
|
||||
@@ -575,26 +528,23 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
|
||||
// Set the referer to be used on the final request, some sites check this.
|
||||
// Mostly used to prevent direct linking and other security resons.
|
||||
referer = url.as_str().to_string();
|
||||
referer = url.to_string();
|
||||
|
||||
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||
// Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from.
|
||||
iconlist.push(Icon::new(35, String::from(url.join("/favicon.ico").unwrap())));
|
||||
iconlist.push(Icon::new(40, String::from(url.join("/apple-touch-icon.png").unwrap())));
|
||||
|
||||
// 384KB should be more than enough for the HTML, though as we only really need the HTML header.
|
||||
let mut limited_reader = content.take(384 * 1024);
|
||||
let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec();
|
||||
|
||||
use html5ever::tendril::TendrilSink;
|
||||
let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default())
|
||||
.from_utf8()
|
||||
.read_from(&mut limited_reader)?;
|
||||
|
||||
let mut base_url: url::Url = url;
|
||||
get_base_href(&dom.document, &mut base_url);
|
||||
get_favicons_node(&dom.document, &mut iconlist, &base_url);
|
||||
let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()).infallible();
|
||||
get_favicons_node(dom, &mut iconlist, &url);
|
||||
} else {
|
||||
// Add the default favicon.ico to the list with just the given domain
|
||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", httpdomain)));
|
||||
iconlist.push(Icon::new(35, format!("{ssldomain}/favicon.ico")));
|
||||
iconlist.push(Icon::new(40, format!("{ssldomain}/apple-touch-icon.png")));
|
||||
iconlist.push(Icon::new(35, format!("{httpdomain}/favicon.ico")));
|
||||
iconlist.push(Icon::new(40, format!("{httpdomain}/apple-touch-icon.png")));
|
||||
}
|
||||
|
||||
// Sort the iconlist by priority
|
||||
@@ -607,12 +557,12 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
})
|
||||
}
|
||||
|
||||
fn get_page(url: &str) -> Result<Response, Error> {
|
||||
get_page_with_referer(url, "")
|
||||
async fn get_page(url: &str) -> Result<Response, Error> {
|
||||
get_page_with_referer(url, "").await
|
||||
}
|
||||
|
||||
fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
||||
warn!("Favicon '{}' resolves to a blacklisted domain or IP!", url);
|
||||
}
|
||||
|
||||
@@ -621,7 +571,7 @@ fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
client = client.header("Referer", referer)
|
||||
}
|
||||
|
||||
match client.send() {
|
||||
match client.send().await {
|
||||
Ok(c) => c.error_for_status().map_err(Into::into),
|
||||
Err(e) => err_silent!(format!("{}", e)),
|
||||
}
|
||||
@@ -639,7 +589,7 @@ fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
|
||||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||
/// ```
|
||||
fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
|
||||
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
||||
// Check if there is a dimension set
|
||||
let (width, height) = parse_sizes(sizes);
|
||||
|
||||
@@ -687,11 +637,11 @@ fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
|
||||
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
|
||||
/// let (width, height) = parse_sizes("32"); // (0, 0)
|
||||
/// ```
|
||||
fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||
fn parse_sizes(sizes: &str) -> (u16, u16) {
|
||||
let mut width: u16 = 0;
|
||||
let mut height: u16 = 0;
|
||||
|
||||
if let Some(sizes) = sizes {
|
||||
if !sizes.is_empty() {
|
||||
match ICON_SIZE_REGEX.captures(sizes.trim()) {
|
||||
None => {}
|
||||
Some(dimensions) => {
|
||||
@@ -706,14 +656,14 @@ fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||
(width, height)
|
||||
}
|
||||
|
||||
fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
if is_domain_blacklisted(domain) {
|
||||
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
if is_domain_blacklisted(domain).await {
|
||||
err_silent!("Domain is blacklisted", domain)
|
||||
}
|
||||
|
||||
let icon_result = get_icon_url(domain)?;
|
||||
let icon_result = get_icon_url(domain).await?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let mut buffer = Bytes::new();
|
||||
let mut icon_type: Option<&str> = None;
|
||||
|
||||
use data_url::DataUrl;
|
||||
@@ -722,8 +672,12 @@ fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
if icon.href.starts_with("data:image") {
|
||||
let datauri = DataUrl::process(&icon.href).unwrap();
|
||||
// Check if we are able to decode the data uri
|
||||
match datauri.decode_to_vec() {
|
||||
Ok((body, _fragment)) => {
|
||||
let mut body = BytesMut::new();
|
||||
match datauri.decode::<_, ()>(|bytes| {
|
||||
body.extend_from_slice(bytes);
|
||||
Ok(())
|
||||
}) {
|
||||
Ok(_) => {
|
||||
// Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create
|
||||
if body.len() >= 67 {
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
@@ -733,16 +687,17 @@ fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
continue;
|
||||
}
|
||||
info!("Extracted icon from data:image uri for {}", domain);
|
||||
buffer = body;
|
||||
buffer = body.freeze();
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ => debug!("Extracted icon from data:image uri is invalid"),
|
||||
};
|
||||
} else {
|
||||
match get_page_with_referer(&icon.href, &icon_result.referer) {
|
||||
Ok(mut res) => {
|
||||
res.copy_to(&mut buffer)?;
|
||||
match get_page_with_referer(&icon.href, &icon_result.referer).await {
|
||||
Ok(res) => {
|
||||
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
||||
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&buffer);
|
||||
if icon_type.is_none() {
|
||||
@@ -765,13 +720,13 @@ fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
Ok((buffer, icon_type))
|
||||
}
|
||||
|
||||
fn save_icon(path: &str, icon: &[u8]) {
|
||||
match File::create(path) {
|
||||
async fn save_icon(path: &str, icon: &[u8]) {
|
||||
match File::create(path).await {
|
||||
Ok(mut f) => {
|
||||
f.write_all(icon).expect("Error writing icon file");
|
||||
f.write_all(icon).await.expect("Error writing icon file");
|
||||
}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache folder");
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Unable to save icon: {:?}", e);
|
||||
@@ -791,13 +746,30 @@ fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Minimize the amount of bytes to be parsed from a reqwest result.
|
||||
/// This prevents very long parsing and memory usage.
|
||||
async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result<Bytes, reqwest::Error> {
|
||||
let mut stream = res.bytes_stream().take(max_size);
|
||||
let mut buf = BytesMut::new();
|
||||
let mut size = 0;
|
||||
while let Some(chunk) = stream.next().await {
|
||||
let chunk = &chunk?;
|
||||
size += chunk.len();
|
||||
buf.extend(chunk);
|
||||
if size >= max_size {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
/// This is an implementation of the default Cookie Jar from Reqwest and reqwest_cookie_store build by pfernie.
|
||||
/// The default cookie jar used by Reqwest keeps all the cookies based upon the Max-Age or Expires which could be a long time.
|
||||
/// That could be used for tracking, to prevent this we force the lifespan of the cookies to always be max two minutes.
|
||||
/// A Cookie Jar is needed because some sites force a redirect with cookies to verify if a request uses cookies or not.
|
||||
use cookie_store::CookieStore;
|
||||
#[derive(Default)]
|
||||
pub struct Jar(RwLock<CookieStore>);
|
||||
pub struct Jar(std::sync::RwLock<CookieStore>);
|
||||
|
||||
impl reqwest::cookie::CookieStore for Jar {
|
||||
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) {
|
||||
@@ -820,8 +792,6 @@ impl reqwest::cookie::CookieStore for Jar {
|
||||
}
|
||||
|
||||
fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> {
|
||||
use bytes::Bytes;
|
||||
|
||||
let cookie_store = self.0.read().unwrap();
|
||||
let s = cookie_store
|
||||
.get_request_values(url)
|
||||
@@ -836,3 +806,158 @@ impl reqwest::cookie::CookieStore for Jar {
|
||||
header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// Custom FaviconEmitter for the html5gum parser.
|
||||
/// The FaviconEmitter is using an almost 1:1 copy of the DefaultEmitter with some small changes.
|
||||
/// This prevents emitting tags like comments, doctype and also strings between the tags.
|
||||
/// Therefor parsing the HTML content is faster.
|
||||
use std::collections::{BTreeSet, VecDeque};
|
||||
|
||||
#[derive(Debug)]
|
||||
enum FaviconToken {
|
||||
StartTag(StartTag),
|
||||
EndTag(EndTag),
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct FaviconEmitter {
|
||||
current_token: Option<FaviconToken>,
|
||||
last_start_tag: HtmlString,
|
||||
current_attribute: Option<(HtmlString, HtmlString)>,
|
||||
seen_attributes: BTreeSet<HtmlString>,
|
||||
emitted_tokens: VecDeque<FaviconToken>,
|
||||
}
|
||||
|
||||
impl FaviconEmitter {
|
||||
fn emit_token(&mut self, token: FaviconToken) {
|
||||
self.emitted_tokens.push_front(token);
|
||||
}
|
||||
|
||||
fn flush_current_attribute(&mut self) {
|
||||
if let Some((k, v)) = self.current_attribute.take() {
|
||||
match self.current_token {
|
||||
Some(FaviconToken::StartTag(ref mut tag)) => {
|
||||
tag.attributes.entry(k).and_modify(|_| {}).or_insert(v);
|
||||
}
|
||||
Some(FaviconToken::EndTag(_)) => {
|
||||
self.seen_attributes.insert(k);
|
||||
}
|
||||
_ => {
|
||||
debug_assert!(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Emitter for FaviconEmitter {
|
||||
type Token = FaviconToken;
|
||||
|
||||
fn set_last_start_tag(&mut self, last_start_tag: Option<&[u8]>) {
|
||||
self.last_start_tag.clear();
|
||||
self.last_start_tag.extend(last_start_tag.unwrap_or_default());
|
||||
}
|
||||
|
||||
fn pop_token(&mut self) -> Option<Self::Token> {
|
||||
self.emitted_tokens.pop_back()
|
||||
}
|
||||
|
||||
fn init_start_tag(&mut self) {
|
||||
self.current_token = Some(FaviconToken::StartTag(StartTag::default()));
|
||||
}
|
||||
|
||||
fn init_end_tag(&mut self) {
|
||||
self.current_token = Some(FaviconToken::EndTag(EndTag::default()));
|
||||
self.seen_attributes.clear();
|
||||
}
|
||||
|
||||
fn emit_current_tag(&mut self) -> Option<html5gum::State> {
|
||||
self.flush_current_attribute();
|
||||
let mut token = self.current_token.take().unwrap();
|
||||
let mut emit = false;
|
||||
match token {
|
||||
FaviconToken::EndTag(ref mut tag) => {
|
||||
// Always clean seen attributes
|
||||
self.seen_attributes.clear();
|
||||
|
||||
// Only trigger an emit for the </head> tag.
|
||||
// This is matched, and will break the for-loop.
|
||||
if *tag.name == b"head" {
|
||||
emit = true;
|
||||
}
|
||||
}
|
||||
FaviconToken::StartTag(ref mut tag) => {
|
||||
// Only trriger an emit for <link> and <base> tags.
|
||||
// These are the only tags we want to parse.
|
||||
if *tag.name == b"link" || *tag.name == b"base" {
|
||||
self.set_last_start_tag(Some(&tag.name));
|
||||
emit = true;
|
||||
} else {
|
||||
self.set_last_start_tag(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only emit the tags we want to parse.
|
||||
if emit {
|
||||
self.emit_token(token);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn push_tag_name(&mut self, s: &[u8]) {
|
||||
match self.current_token {
|
||||
Some(
|
||||
FaviconToken::StartTag(StartTag {
|
||||
ref mut name,
|
||||
..
|
||||
})
|
||||
| FaviconToken::EndTag(EndTag {
|
||||
ref mut name,
|
||||
..
|
||||
}),
|
||||
) => {
|
||||
name.extend(s);
|
||||
}
|
||||
_ => debug_assert!(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn init_attribute(&mut self) {
|
||||
self.flush_current_attribute();
|
||||
self.current_attribute = Some(Default::default());
|
||||
}
|
||||
|
||||
fn push_attribute_name(&mut self, s: &[u8]) {
|
||||
self.current_attribute.as_mut().unwrap().0.extend(s);
|
||||
}
|
||||
|
||||
fn push_attribute_value(&mut self, s: &[u8]) {
|
||||
self.current_attribute.as_mut().unwrap().1.extend(s);
|
||||
}
|
||||
|
||||
fn current_is_appropriate_end_tag_token(&mut self) -> bool {
|
||||
match self.current_token {
|
||||
Some(FaviconToken::EndTag(ref tag)) => !self.last_start_tag.is_empty() && self.last_start_tag == tag.name,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
// We do not want and need these parts of the HTML document
|
||||
// These will be skipped and ignored during the tokenization and iteration.
|
||||
fn emit_current_comment(&mut self) {}
|
||||
fn emit_current_doctype(&mut self) {}
|
||||
fn emit_eof(&mut self) {}
|
||||
fn emit_error(&mut self, _: html5gum::Error) {}
|
||||
fn emit_string(&mut self, _: &[u8]) {}
|
||||
fn init_comment(&mut self) {}
|
||||
fn init_doctype(&mut self) {}
|
||||
fn push_comment(&mut self, _: &[u8]) {}
|
||||
fn push_doctype_name(&mut self, _: &[u8]) {}
|
||||
fn push_doctype_public_identifier(&mut self, _: &[u8]) {}
|
||||
fn push_doctype_system_identifier(&mut self, _: &[u8]) {}
|
||||
fn set_doctype_public_identifier(&mut self, _: &[u8]) {}
|
||||
fn set_doctype_system_identifier(&mut self, _: &[u8]) {}
|
||||
fn set_force_quirks(&mut self) {}
|
||||
fn set_self_closing(&mut self) {}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
use chrono::Utc;
|
||||
use num_traits::FromPrimitive;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{
|
||||
request::{Form, FormItems, FromForm},
|
||||
form::{Form, FromForm},
|
||||
Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::accounts::{PreloginData, _prelogin},
|
||||
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
|
||||
ApiResult, EmptyResult, JsonResult,
|
||||
ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
},
|
||||
auth::ClientIp,
|
||||
db::{models::*, DbConn},
|
||||
@@ -19,17 +20,17 @@ use crate::{
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![login]
|
||||
routes![login, prelogin]
|
||||
}
|
||||
|
||||
#[post("/connect/token", data = "<data>")]
|
||||
fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
async fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
let data: ConnectData = data.into_inner();
|
||||
|
||||
match data.grant_type.as_ref() {
|
||||
"refresh_token" => {
|
||||
_check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
|
||||
_refresh_login(data, conn)
|
||||
_refresh_login(data, conn).await
|
||||
}
|
||||
"password" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
@@ -41,34 +42,34 @@ fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, conn, &ip)
|
||||
_password_login(data, conn, &ip).await
|
||||
}
|
||||
"client_credentials" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
|
||||
_check_is_some(&data.scope, "scope cannot be blank")?;
|
||||
|
||||
_api_key_login(data, conn, &ip)
|
||||
_api_key_login(data, conn, &ip).await
|
||||
}
|
||||
t => err!("Invalid type", t),
|
||||
}
|
||||
}
|
||||
|
||||
fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
async fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
// Extract token
|
||||
let token = data.refresh_token.unwrap();
|
||||
|
||||
// Get device by refresh token
|
||||
let mut device = Device::find_by_refresh_token(&token, &conn).map_res("Invalid refresh token")?;
|
||||
let mut device = Device::find_by_refresh_token(&token, &conn).await.map_res("Invalid refresh token")?;
|
||||
|
||||
let scope = "api offline_access";
|
||||
let scope_vec = vec!["api".into(), "offline_access".into()];
|
||||
|
||||
// Common
|
||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn);
|
||||
let user = User::find_by_uuid(&device.user_uuid, &conn).await.unwrap();
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(&conn)?;
|
||||
device.save(&conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"access_token": access_token,
|
||||
@@ -86,7 +87,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
})))
|
||||
}
|
||||
|
||||
fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
|
||||
async fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
|
||||
// Validate scope
|
||||
let scope = data.scope.as_ref().unwrap();
|
||||
if scope != "api offline_access" {
|
||||
@@ -98,8 +99,8 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
crate::ratelimit::check_limit_login(&ip.ip)?;
|
||||
|
||||
// Get the user
|
||||
let username = data.username.as_ref().unwrap();
|
||||
let user = match User::find_by_mail(username, &conn) {
|
||||
let username = data.username.as_ref().unwrap().trim();
|
||||
let user = match User::find_by_mail(username, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
||||
};
|
||||
@@ -130,11 +131,11 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
user.last_verifying_at = Some(now);
|
||||
user.login_verify_count += 1;
|
||||
|
||||
if let Err(e) = user.save(&conn) {
|
||||
if let Err(e) = user.save(&conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
||||
error!("Error auto-sending email verification email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
@@ -144,12 +145,12 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
err!("Please verify your email before trying again.", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
}
|
||||
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user).await;
|
||||
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &conn)?;
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
@@ -159,9 +160,9 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
}
|
||||
|
||||
// Common
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn);
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(&conn)?;
|
||||
device.save(&conn).await?;
|
||||
|
||||
let mut result = json!({
|
||||
"access_token": access_token,
|
||||
@@ -187,7 +188,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
|
||||
async fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
|
||||
// Validate scope
|
||||
let scope = data.scope.as_ref().unwrap();
|
||||
if scope != "api" {
|
||||
@@ -204,7 +205,7 @@ fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
Some(uuid) => uuid,
|
||||
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
|
||||
};
|
||||
let user = match User::find_by_uuid(user_uuid, &conn) {
|
||||
let user = match User::find_by_uuid(user_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
|
||||
};
|
||||
@@ -220,11 +221,11 @@ fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
err!("Incorrect client_secret", format!("IP: {}. Username: {}.", ip.ip, user.email))
|
||||
}
|
||||
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user).await;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
let now = Utc::now().naive_utc();
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
@@ -234,9 +235,9 @@ fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
}
|
||||
|
||||
// Common
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn);
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(&conn)?;
|
||||
device.save(&conn).await?;
|
||||
|
||||
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
||||
|
||||
@@ -258,7 +259,7 @@ fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
}
|
||||
|
||||
/// Retrieves an existing device or creates a new device from ConnectData and the User
|
||||
fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
|
||||
async fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
|
||||
// On iOS, device_type sends "iOS", on others it sends a number
|
||||
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
|
||||
let device_id = data.device_identifier.clone().expect("No device id provided");
|
||||
@@ -266,17 +267,8 @@ fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool)
|
||||
|
||||
let mut new_device = false;
|
||||
// Find device or create new
|
||||
let device = match Device::find_by_uuid(&device_id, conn) {
|
||||
Some(device) => {
|
||||
// Check if owned device, and recreate if not
|
||||
if device.user_uuid != user.uuid {
|
||||
info!("Device exists but is owned by another user. The old device will be discarded");
|
||||
new_device = true;
|
||||
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||
} else {
|
||||
device
|
||||
}
|
||||
}
|
||||
let device = match Device::find_by_uuid_and_user(&device_id, &user.uuid, conn).await {
|
||||
Some(device) => device,
|
||||
None => {
|
||||
new_device = true;
|
||||
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||
@@ -286,28 +278,28 @@ fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool)
|
||||
(device, new_device)
|
||||
}
|
||||
|
||||
fn twofactor_auth(
|
||||
async fn twofactor_auth(
|
||||
user_uuid: &str,
|
||||
data: &ConnectData,
|
||||
device: &mut Device,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
) -> ApiResult<Option<String>> {
|
||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn);
|
||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn).await;
|
||||
|
||||
// No twofactor token if twofactor is disabled
|
||||
if twofactors.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn)?;
|
||||
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
|
||||
|
||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
|
||||
|
||||
let twofactor_code = match data.two_factor_token {
|
||||
Some(ref code) => code,
|
||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA token not provided"),
|
||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, "2FA token not provided"),
|
||||
};
|
||||
|
||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
@@ -320,16 +312,17 @@ fn twofactor_auth(
|
||||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => {
|
||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?
|
||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn).await?
|
||||
}
|
||||
Some(TwoFactorType::Webauthn) => {
|
||||
_tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await?
|
||||
}
|
||||
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::Webauthn) => _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||
Some(TwoFactorType::Duo) => {
|
||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?
|
||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
||||
}
|
||||
Some(TwoFactorType::Email) => {
|
||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?
|
||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn).await?
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Remember) => {
|
||||
@@ -338,14 +331,17 @@ fn twofactor_auth(
|
||||
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
||||
}
|
||||
_ => {
|
||||
err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided")
|
||||
err_json!(
|
||||
_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?,
|
||||
"2FA Remember token not provided"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => err!("Invalid two factor provider"),
|
||||
}
|
||||
|
||||
TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn)?;
|
||||
TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?;
|
||||
|
||||
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
||||
Ok(Some(device.refresh_twofactor_remember()))
|
||||
@@ -359,7 +355,7 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
||||
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
||||
}
|
||||
|
||||
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
|
||||
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
|
||||
use crate::api::core::two_factor;
|
||||
|
||||
let mut result = json!({
|
||||
@@ -375,38 +371,18 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
match TwoFactorType::from_i32(*provider) {
|
||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||
|
||||
Some(TwoFactorType::U2f) if CONFIG.domain_set() => {
|
||||
let request = two_factor::u2f::generate_u2f_login(user_uuid, conn)?;
|
||||
let mut challenge_list = Vec::new();
|
||||
|
||||
for key in request.registered_keys {
|
||||
challenge_list.push(json!({
|
||||
"appId": request.app_id,
|
||||
"challenge": request.challenge,
|
||||
"version": key.version,
|
||||
"keyHandle": key.key_handle,
|
||||
}));
|
||||
}
|
||||
|
||||
let challenge_list_str = serde_json::to_string(&challenge_list).unwrap();
|
||||
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Challenges": challenge_list_str,
|
||||
});
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn)?;
|
||||
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Duo) => {
|
||||
let email = match User::find_by_uuid(user_uuid, conn) {
|
||||
let email = match User::find_by_uuid(user_uuid, conn).await {
|
||||
Some(u) => u.email,
|
||||
None => err!("User does not exist"),
|
||||
};
|
||||
|
||||
let (signature, host) = duo::generate_duo_signature(&email, conn)?;
|
||||
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
|
||||
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Host": host,
|
||||
@@ -415,7 +391,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
}
|
||||
|
||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("No YubiKey devices registered"),
|
||||
};
|
||||
@@ -430,14 +406,14 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
Some(tf_type @ TwoFactorType::Email) => {
|
||||
use crate::api::core::two_factor as _tf;
|
||||
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("No twofactor email registered"),
|
||||
};
|
||||
|
||||
// Send email immediately if email is the only 2FA option
|
||||
if providers.len() == 1 {
|
||||
_tf::email::send_token(user_uuid, conn)?
|
||||
_tf::email::send_token(user_uuid, conn).await?
|
||||
}
|
||||
|
||||
let email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
@@ -453,68 +429,65 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[post("/accounts/prelogin", data = "<data>")]
|
||||
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
|
||||
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[derive(Debug, Clone, Default, FromForm)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ConnectData {
|
||||
// refresh_token, password, client_credentials (API key)
|
||||
grant_type: String,
|
||||
#[field(name = uncased("grant_type"))]
|
||||
#[field(name = uncased("granttype"))]
|
||||
grant_type: String, // refresh_token, password, client_credentials (API key)
|
||||
|
||||
// Needed for grant_type="refresh_token"
|
||||
#[field(name = uncased("refresh_token"))]
|
||||
#[field(name = uncased("refreshtoken"))]
|
||||
refresh_token: Option<String>,
|
||||
|
||||
// Needed for grant_type = "password" | "client_credentials"
|
||||
client_id: Option<String>, // web, cli, desktop, browser, mobile
|
||||
client_secret: Option<String>, // API key login (cli only)
|
||||
#[field(name = uncased("client_id"))]
|
||||
#[field(name = uncased("clientid"))]
|
||||
client_id: Option<String>, // web, cli, desktop, browser, mobile
|
||||
#[field(name = uncased("client_secret"))]
|
||||
#[field(name = uncased("clientsecret"))]
|
||||
client_secret: Option<String>,
|
||||
#[field(name = uncased("password"))]
|
||||
password: Option<String>,
|
||||
#[field(name = uncased("scope"))]
|
||||
scope: Option<String>,
|
||||
#[field(name = uncased("username"))]
|
||||
username: Option<String>,
|
||||
|
||||
#[field(name = uncased("device_identifier"))]
|
||||
#[field(name = uncased("deviceidentifier"))]
|
||||
device_identifier: Option<String>,
|
||||
#[field(name = uncased("device_name"))]
|
||||
#[field(name = uncased("devicename"))]
|
||||
device_name: Option<String>,
|
||||
#[field(name = uncased("device_type"))]
|
||||
#[field(name = uncased("devicetype"))]
|
||||
device_type: Option<String>,
|
||||
device_push_token: Option<String>, // Unused; mobile device push not yet supported.
|
||||
#[allow(unused)]
|
||||
#[field(name = uncased("device_push_token"))]
|
||||
#[field(name = uncased("devicepushtoken"))]
|
||||
_device_push_token: Option<String>, // Unused; mobile device push not yet supported.
|
||||
|
||||
// Needed for two-factor auth
|
||||
#[field(name = uncased("two_factor_provider"))]
|
||||
#[field(name = uncased("twofactorprovider"))]
|
||||
two_factor_provider: Option<i32>,
|
||||
#[field(name = uncased("two_factor_token"))]
|
||||
#[field(name = uncased("twofactortoken"))]
|
||||
two_factor_token: Option<String>,
|
||||
#[field(name = uncased("two_factor_remember"))]
|
||||
#[field(name = uncased("twofactorremember"))]
|
||||
two_factor_remember: Option<i32>,
|
||||
}
|
||||
|
||||
impl<'f> FromForm<'f> for ConnectData {
|
||||
type Error = String;
|
||||
|
||||
fn from_form(items: &mut FormItems<'f>, _strict: bool) -> Result<Self, Self::Error> {
|
||||
let mut form = Self::default();
|
||||
for item in items {
|
||||
let (key, value) = item.key_value_decoded();
|
||||
let mut normalized_key = key.to_lowercase();
|
||||
normalized_key.retain(|c| c != '_'); // Remove '_'
|
||||
|
||||
match normalized_key.as_ref() {
|
||||
"granttype" => form.grant_type = value,
|
||||
"refreshtoken" => form.refresh_token = Some(value),
|
||||
"clientid" => form.client_id = Some(value),
|
||||
"clientsecret" => form.client_secret = Some(value),
|
||||
"password" => form.password = Some(value),
|
||||
"scope" => form.scope = Some(value),
|
||||
"username" => form.username = Some(value),
|
||||
"deviceidentifier" => form.device_identifier = Some(value),
|
||||
"devicename" => form.device_name = Some(value),
|
||||
"devicetype" => form.device_type = Some(value),
|
||||
"devicepushtoken" => form.device_push_token = Some(value),
|
||||
"twofactorprovider" => form.two_factor_provider = value.parse().ok(),
|
||||
"twofactortoken" => form.two_factor_token = Some(value),
|
||||
"twofactorremember" => form.two_factor_remember = value.parse().ok(),
|
||||
key => warn!("Detected unexpected parameter during login: {}", key),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(form)
|
||||
}
|
||||
}
|
||||
|
||||
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
||||
if value.is_none() {
|
||||
err!(msg)
|
||||
|
||||
@@ -5,11 +5,12 @@ mod identity;
|
||||
mod notifications;
|
||||
mod web;
|
||||
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
pub use crate::api::{
|
||||
admin::routes as admin_routes,
|
||||
core::catchers as core_catchers,
|
||||
core::purge_sends,
|
||||
core::purge_trashed_ciphers,
|
||||
core::routes as core_routes,
|
||||
@@ -19,6 +20,7 @@ pub use crate::api::{
|
||||
identity::routes as identity_routes,
|
||||
notifications::routes as notifications_routes,
|
||||
notifications::{start_notification_server, Notify, UpdateType},
|
||||
web::catchers as web_catchers,
|
||||
web::routes as web_routes,
|
||||
};
|
||||
use crate::util;
|
||||
|
||||
@@ -1,19 +1,41 @@
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use chrono::NaiveDateTime;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use rmpv::Value;
|
||||
use rocket::{serde::json::Json, Route};
|
||||
use serde_json::Value as JsonValue;
|
||||
use tokio::{
|
||||
net::{TcpListener, TcpStream},
|
||||
sync::mpsc::Sender,
|
||||
};
|
||||
use tokio_tungstenite::{
|
||||
accept_hdr_async,
|
||||
tungstenite::{handshake, Message},
|
||||
};
|
||||
|
||||
use crate::{api::EmptyResult, auth::Headers, Error, CONFIG};
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
auth::Headers,
|
||||
db::models::{Cipher, Folder, Send, User},
|
||||
Error, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate, websockets_err]
|
||||
}
|
||||
|
||||
static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
||||
|
||||
#[get("/hub")]
|
||||
fn websockets_err() -> EmptyResult {
|
||||
static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
||||
|
||||
if CONFIG.websocket_enabled()
|
||||
&& SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok()
|
||||
{
|
||||
@@ -55,19 +77,6 @@ fn negotiate(_headers: Headers) -> Json<JsonValue> {
|
||||
//
|
||||
// Websockets server
|
||||
//
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
use ws::{self, util::Token, Factory, Handler, Handshake, Message, Sender};
|
||||
|
||||
use chashmap::CHashMap;
|
||||
use chrono::NaiveDateTime;
|
||||
use serde_json::from_str;
|
||||
|
||||
use crate::db::models::{Cipher, Folder, Send, User};
|
||||
|
||||
use rmpv::Value;
|
||||
|
||||
fn serialize(val: Value) -> Vec<u8> {
|
||||
use rmpv::encode::write_value;
|
||||
@@ -118,192 +127,49 @@ fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
|
||||
}
|
||||
}
|
||||
|
||||
// Server WebSocket handler
|
||||
pub struct WsHandler {
|
||||
out: Sender,
|
||||
user_uuid: Option<String>,
|
||||
users: WebSocketUsers,
|
||||
}
|
||||
|
||||
const RECORD_SEPARATOR: u8 = 0x1e;
|
||||
const INITIAL_RESPONSE: [u8; 3] = [0x7b, 0x7d, RECORD_SEPARATOR]; // {, }, <RS>
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct InitialMessage {
|
||||
protocol: String,
|
||||
#[derive(Deserialize, Copy, Clone, Eq, PartialEq)]
|
||||
struct InitialMessage<'a> {
|
||||
protocol: &'a str,
|
||||
version: i32,
|
||||
}
|
||||
|
||||
const PING_MS: u64 = 15_000;
|
||||
const PING: Token = Token(1);
|
||||
|
||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||
|
||||
impl WsHandler {
|
||||
fn err(&self, msg: &'static str) -> ws::Result<()> {
|
||||
self.out.close(ws::CloseCode::Invalid)?;
|
||||
|
||||
// We need to specifically return an IO error so ws closes the connection
|
||||
let io_error = io::Error::from(io::ErrorKind::InvalidData);
|
||||
Err(ws::Error::new(ws::ErrorKind::Io(io_error), msg))
|
||||
}
|
||||
|
||||
fn get_request_token(&self, hs: Handshake) -> Option<String> {
|
||||
use std::str::from_utf8;
|
||||
|
||||
// Verify we have a token header
|
||||
if let Some(header_value) = hs.request.header("Authorization") {
|
||||
if let Ok(converted) = from_utf8(header_value) {
|
||||
if let Some(token_part) = converted.split("Bearer ").nth(1) {
|
||||
return Some(token_part.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Otherwise verify the query parameter value
|
||||
let path = hs.request.resource();
|
||||
if let Some(params) = path.split('?').nth(1) {
|
||||
let params_iter = params.split('&').take(1);
|
||||
for val in params_iter {
|
||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
||||
return Some(stripped.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for WsHandler {
|
||||
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
||||
//
|
||||
// We don't use `id`, and as of around 2020-03-25, the official clients
|
||||
// no longer seem to pass `id` (only `access_token`).
|
||||
|
||||
// Get user token from header or query parameter
|
||||
let access_token = match self.get_request_token(hs) {
|
||||
Some(token) => token,
|
||||
_ => return self.err("Missing access token"),
|
||||
};
|
||||
|
||||
// Validate the user
|
||||
use crate::auth;
|
||||
let claims = match auth::decode_login(access_token.as_str()) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => return self.err("Invalid access token provided"),
|
||||
};
|
||||
|
||||
// Assign the user to the handler
|
||||
let user_uuid = claims.sub;
|
||||
self.user_uuid = Some(user_uuid.clone());
|
||||
|
||||
// Add the current Sender to the user list
|
||||
let handler_insert = self.out.clone();
|
||||
let handler_update = self.out.clone();
|
||||
|
||||
self.users.map.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
||||
|
||||
// Schedule a ping to keep the connection alive
|
||||
self.out.timeout(PING_MS, PING)
|
||||
}
|
||||
|
||||
fn on_message(&mut self, msg: Message) -> ws::Result<()> {
|
||||
if let Message::Text(text) = msg.clone() {
|
||||
let json = &text[..text.len() - 1]; // Remove last char
|
||||
|
||||
if let Ok(InitialMessage {
|
||||
protocol,
|
||||
version,
|
||||
}) = from_str::<InitialMessage>(json)
|
||||
{
|
||||
if &protocol == "messagepack" && version == 1 {
|
||||
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If it's not the initial message, just echo the message
|
||||
self.out.send(msg)
|
||||
}
|
||||
|
||||
fn on_timeout(&mut self, event: Token) -> ws::Result<()> {
|
||||
if event == PING {
|
||||
// send ping
|
||||
self.out.send(create_ping())?;
|
||||
|
||||
// reschedule the timeout
|
||||
self.out.timeout(PING_MS, PING)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct WsFactory {
|
||||
pub users: WebSocketUsers,
|
||||
}
|
||||
|
||||
impl WsFactory {
|
||||
pub fn init() -> Self {
|
||||
WsFactory {
|
||||
users: WebSocketUsers {
|
||||
map: Arc::new(CHashMap::new()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Factory for WsFactory {
|
||||
type Handler = WsHandler;
|
||||
|
||||
fn connection_made(&mut self, out: Sender) -> Self::Handler {
|
||||
WsHandler {
|
||||
out,
|
||||
user_uuid: None,
|
||||
users: self.users.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn connection_lost(&mut self, handler: Self::Handler) {
|
||||
// Remove handler
|
||||
if let Some(user_uuid) = &handler.user_uuid {
|
||||
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
|
||||
if let Some(pos) = user_conn.iter().position(|x| x == &handler.out) {
|
||||
user_conn.remove(pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
static INITIAL_MESSAGE: InitialMessage<'static> = InitialMessage {
|
||||
protocol: "messagepack",
|
||||
version: 1,
|
||||
};
|
||||
|
||||
// We attach the UUID to the sender so we can differentiate them when we need to remove them from the Vec
|
||||
type UserSenders = (uuid::Uuid, Sender<Message>);
|
||||
#[derive(Clone)]
|
||||
pub struct WebSocketUsers {
|
||||
map: Arc<CHashMap<String, Vec<Sender>>>,
|
||||
map: Arc<dashmap::DashMap<String, Vec<UserSenders>>>,
|
||||
}
|
||||
|
||||
impl WebSocketUsers {
|
||||
fn send_update(&self, user_uuid: &str, data: &[u8]) -> ws::Result<()> {
|
||||
if let Some(user) = self.map.get(user_uuid) {
|
||||
for sender in user.iter() {
|
||||
sender.send(data)?;
|
||||
async fn send_update(&self, user_uuid: &str, data: &[u8]) {
|
||||
if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) {
|
||||
for (_, sender) in user.iter() {
|
||||
if sender.send(Message::binary(data)).await.is_err() {
|
||||
// TODO: Delete from map here too?
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
ut,
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid, &data).ok();
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
}
|
||||
|
||||
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||
pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), folder.uuid.clone().into()),
|
||||
@@ -313,10 +179,10 @@ impl WebSocketUsers {
|
||||
ut,
|
||||
);
|
||||
|
||||
self.send_update(&folder.user_uuid, &data).ok();
|
||||
self.send_update(&folder.user_uuid, &data).await;
|
||||
}
|
||||
|
||||
pub fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &[String]) {
|
||||
pub async fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &[String]) {
|
||||
let user_uuid = convert_option(cipher.user_uuid.clone());
|
||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||
|
||||
@@ -332,11 +198,11 @@ impl WebSocketUsers {
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).ok();
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_send_update(&self, ut: UpdateType, send: &Send, user_uuids: &[String]) {
|
||||
pub async fn send_send_update(&self, ut: UpdateType, send: &Send, user_uuids: &[String]) {
|
||||
let user_uuid = convert_option(send.user_uuid.clone());
|
||||
|
||||
let data = create_update(
|
||||
@@ -349,7 +215,7 @@ impl WebSocketUsers {
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).ok();
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -392,7 +258,7 @@ fn create_ping() -> Vec<u8> {
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(PartialEq)]
|
||||
#[derive(Eq, PartialEq)]
|
||||
pub enum UpdateType {
|
||||
CipherUpdate = 0,
|
||||
CipherCreate = 1,
|
||||
@@ -416,28 +282,145 @@ pub enum UpdateType {
|
||||
None = 100,
|
||||
}
|
||||
|
||||
use rocket::State;
|
||||
pub type Notify<'a> = State<'a, WebSocketUsers>;
|
||||
pub type Notify<'a> = &'a rocket::State<WebSocketUsers>;
|
||||
|
||||
pub fn start_notification_server() -> WebSocketUsers {
|
||||
let factory = WsFactory::init();
|
||||
let users = factory.users.clone();
|
||||
let users = WebSocketUsers {
|
||||
map: Arc::new(dashmap::DashMap::new()),
|
||||
};
|
||||
|
||||
if CONFIG.websocket_enabled() {
|
||||
thread::spawn(move || {
|
||||
let mut settings = ws::Settings::default();
|
||||
settings.max_connections = 500;
|
||||
settings.queue_size = 2;
|
||||
settings.panic_on_internal = false;
|
||||
let users2 = users.clone();
|
||||
tokio::spawn(async move {
|
||||
let addr = (CONFIG.websocket_address(), CONFIG.websocket_port());
|
||||
info!("Starting WebSockets server on {}:{}", addr.0, addr.1);
|
||||
let listener = TcpListener::bind(addr).await.expect("Can't listen on websocket port");
|
||||
|
||||
ws::Builder::new()
|
||||
.with_settings(settings)
|
||||
.build(factory)
|
||||
.unwrap()
|
||||
.listen((CONFIG.websocket_address().as_str(), CONFIG.websocket_port()))
|
||||
.unwrap();
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>();
|
||||
CONFIG.set_ws_shutdown_handle(shutdown_tx);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
Ok((stream, addr)) = listener.accept() => {
|
||||
tokio::spawn(handle_connection(stream, users2.clone(), addr));
|
||||
}
|
||||
|
||||
_ = &mut shutdown_rx => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Shutting down WebSockets server!")
|
||||
});
|
||||
}
|
||||
|
||||
users
|
||||
}
|
||||
|
||||
async fn handle_connection(stream: TcpStream, users: WebSocketUsers, addr: SocketAddr) -> Result<(), Error> {
|
||||
let mut user_uuid: Option<String> = None;
|
||||
|
||||
info!("Accepting WS connection from {addr}");
|
||||
|
||||
// Accept connection, do initial handshake, validate auth token and get the user ID
|
||||
use handshake::server::{Request, Response};
|
||||
let mut stream = accept_hdr_async(stream, |req: &Request, res: Response| {
|
||||
if let Some(token) = get_request_token(req) {
|
||||
if let Ok(claims) = crate::auth::decode_login(&token) {
|
||||
user_uuid = Some(claims.sub);
|
||||
return Ok(res);
|
||||
}
|
||||
}
|
||||
Err(Response::builder().status(401).body(None).unwrap())
|
||||
})
|
||||
.await?;
|
||||
|
||||
let user_uuid = user_uuid.expect("User UUID should be set after the handshake");
|
||||
|
||||
// Add a channel to send messages to this client to the map
|
||||
let entry_uuid = uuid::Uuid::new_v4();
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(100);
|
||||
users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx));
|
||||
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
tokio::select! {
|
||||
res = stream.next() => {
|
||||
match res {
|
||||
Some(Ok(message)) => {
|
||||
// Respond to any pings
|
||||
if let Message::Ping(ping) = message {
|
||||
if stream.send(Message::Pong(ping)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
} else if let Message::Pong(_) = message {
|
||||
/* Ignored */
|
||||
continue;
|
||||
}
|
||||
|
||||
// We should receive an initial message with the protocol and version, and we will reply to it
|
||||
if let Message::Text(ref message) = message {
|
||||
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
||||
|
||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||
stream.send(Message::binary(INITIAL_RESPONSE)).await?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Just echo anything else the client sends
|
||||
if stream.send(message).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
res = rx.recv() => {
|
||||
match res {
|
||||
Some(res) => {
|
||||
if stream.send(res).await.is_err() {
|
||||
break;
|
||||
}
|
||||
},
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
_= interval.tick() => {
|
||||
if stream.send(Message::Ping(create_ping())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Closing WS connection from {addr}");
|
||||
|
||||
// Delete from map
|
||||
users.map.entry(user_uuid).or_default().retain(|(uuid, _)| uuid != &entry_uuid);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_request_token(req: &handshake::server::Request) -> Option<String> {
|
||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||
|
||||
if let Some(Ok(auth)) = req.headers().get("Authorization").map(|a| a.to_str()) {
|
||||
if let Some(token_part) = auth.strip_prefix("Bearer ") {
|
||||
return Some(token_part.to_owned());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(params) = req.uri().query() {
|
||||
let params_iter = params.split('&').take(1);
|
||||
for val in params_iter {
|
||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
||||
return Some(stripped.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rocket::{http::ContentType, response::content::Content, response::NamedFile, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{fs::NamedFile, http::ContentType, Catcher, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::core::now,
|
||||
error::Error,
|
||||
util::{Cached, SafeString},
|
||||
CONFIG,
|
||||
@@ -20,17 +21,30 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
if CONFIG.web_vault_enabled() {
|
||||
catchers![not_found]
|
||||
} else {
|
||||
catchers![]
|
||||
}
|
||||
}
|
||||
|
||||
#[catch(404)]
|
||||
async fn not_found() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("404.html")).await.ok(), false)
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
fn web_index() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).ok(), false)
|
||||
async fn web_index() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false)
|
||||
}
|
||||
|
||||
#[get("/app-id.json")]
|
||||
fn app_id() -> Cached<Content<Json<Value>>> {
|
||||
fn app_id() -> Cached<(ContentType, Json<Value>)> {
|
||||
let content_type = ContentType::new("application", "fido.trusted-apps+json");
|
||||
|
||||
Cached::long(
|
||||
Content(
|
||||
(
|
||||
content_type,
|
||||
Json(json!({
|
||||
"trustedFacets": [
|
||||
@@ -58,45 +72,37 @@ fn app_id() -> Cached<Content<Json<Value>>> {
|
||||
}
|
||||
|
||||
#[get("/<p..>", rank = 10)] // Only match this if the other routes don't match
|
||||
fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).ok(), true)
|
||||
async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true)
|
||||
}
|
||||
|
||||
#[get("/attachments/<uuid>/<file_id>")]
|
||||
fn attachments(uuid: SafeString, file_id: SafeString) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).ok()
|
||||
async fn attachments(uuid: SafeString, file_id: SafeString) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok()
|
||||
}
|
||||
|
||||
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
||||
use crate::db::DbConn;
|
||||
#[get("/alive")]
|
||||
fn alive(_conn: DbConn) -> Json<String> {
|
||||
use crate::util::format_date;
|
||||
use chrono::Utc;
|
||||
|
||||
Json(format_date(&Utc::now().naive_utc()))
|
||||
now()
|
||||
}
|
||||
|
||||
#[get("/vw_static/<filename>")]
|
||||
fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
||||
fn static_files(filename: String) -> Result<(ContentType, &'static [u8]), Error> {
|
||||
match filename.as_ref() {
|
||||
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
"error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||
"hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
"vaultwarden-icon.png" => {
|
||||
Ok(Content(ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png")))
|
||||
}
|
||||
|
||||
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => {
|
||||
Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js")))
|
||||
}
|
||||
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
||||
"datatables.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.6.0.slim.js" => {
|
||||
Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.0.slim.js")))
|
||||
"mail-github.png" => Ok((ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok((ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
"error-x.svg" => Ok((ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||
"hibp.png" => Ok((ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
"vaultwarden-icon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png"))),
|
||||
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"identicon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.6.1.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.1.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {}", filename)),
|
||||
}
|
||||
|
||||
323
src/auth.rs
323
src/auth.rs
@@ -1,19 +1,14 @@
|
||||
//
|
||||
// JWT Handling
|
||||
//
|
||||
use chrono::{Duration, Utc};
|
||||
use num_traits::FromPrimitive;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use jsonwebtoken::{self, Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::ser::Serialize;
|
||||
|
||||
use crate::{
|
||||
error::{Error, MapResult},
|
||||
util::read_file,
|
||||
CONFIG,
|
||||
};
|
||||
use crate::{error::Error, CONFIG};
|
||||
|
||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
|
||||
@@ -30,13 +25,13 @@ static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.
|
||||
static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin()));
|
||||
|
||||
static PRIVATE_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
read_file(&CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key.\n{}", e))
|
||||
std::fs::read(&CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key.\n{}", e))
|
||||
});
|
||||
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
|
||||
EncodingKey::from_rsa_pem(&PRIVATE_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{}", e))
|
||||
});
|
||||
static PUBLIC_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
read_file(&CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{}", e))
|
||||
std::fs::read(&CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{}", e))
|
||||
});
|
||||
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
|
||||
DecodingKey::from_rsa_pem(&PUBLIC_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{}", e))
|
||||
@@ -55,18 +50,22 @@ pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||
}
|
||||
|
||||
fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Error> {
|
||||
let validation = jsonwebtoken::Validation {
|
||||
leeway: 30, // 30 seconds
|
||||
validate_exp: true,
|
||||
validate_nbf: true,
|
||||
aud: None,
|
||||
iss: Some(issuer),
|
||||
sub: None,
|
||||
algorithms: vec![JWT_ALGORITHM],
|
||||
};
|
||||
let mut validation = jsonwebtoken::Validation::new(JWT_ALGORITHM);
|
||||
validation.leeway = 30; // 30 seconds
|
||||
validation.validate_exp = true;
|
||||
validation.validate_nbf = true;
|
||||
validation.set_issuer(&[issuer]);
|
||||
|
||||
let token = token.replace(char::is_whitespace, "");
|
||||
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation).map(|d| d.claims).map_res("Error decoding JWT")
|
||||
match jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation) {
|
||||
Ok(d) => Ok(d.claims),
|
||||
Err(err) => match *err.kind() {
|
||||
ErrorKind::InvalidToken => err!("Token is invalid"),
|
||||
ErrorKind::InvalidIssuer => err!("Issuer is invalid"),
|
||||
ErrorKind::ExpiredSignature => err!("Token has expired"),
|
||||
_ => err!("Error decoding JWT"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_login(token: &str) -> Result<LoginJwtClaims, Error> {
|
||||
@@ -153,9 +152,10 @@ pub fn generate_invite_claims(
|
||||
invited_by_email: Option<String>,
|
||||
) -> InviteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
InviteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
iss: JWT_INVITE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
email,
|
||||
@@ -190,9 +190,10 @@ pub fn generate_emergency_access_invite_claims(
|
||||
grantor_email: Option<String>,
|
||||
) -> EmergencyAccessInviteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
EmergencyAccessInviteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
email,
|
||||
@@ -216,9 +217,10 @@ pub struct BasicJwtClaims {
|
||||
|
||||
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
iss: JWT_DELETE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
}
|
||||
@@ -226,9 +228,10 @@ pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||
|
||||
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
}
|
||||
@@ -257,7 +260,10 @@ pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||
//
|
||||
// Bearer token authentication
|
||||
//
|
||||
use rocket::request::{FromRequest, Outcome, Request};
|
||||
use rocket::{
|
||||
outcome::try_outcome,
|
||||
request::{FromRequest, Outcome, Request},
|
||||
};
|
||||
|
||||
use crate::db::{
|
||||
models::{CollectionUser, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
|
||||
@@ -268,10 +274,11 @@ pub struct Host {
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Host {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for Host {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
// Get host
|
||||
@@ -314,17 +321,14 @@ pub struct Headers {
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for Headers {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
let host = match Host::from_request(request) {
|
||||
Outcome::Forward(_) => return Outcome::Forward(()),
|
||||
Outcome::Failure(f) => return Outcome::Failure(f),
|
||||
Outcome::Success(host) => host.host,
|
||||
};
|
||||
let host = try_outcome!(Host::from_request(request).await).host;
|
||||
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
@@ -344,17 +348,17 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
let device_uuid = claims.device;
|
||||
let user_uuid = claims.sub;
|
||||
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
let conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
let device = match Device::find_by_uuid(&device_uuid, &conn) {
|
||||
let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &conn).await {
|
||||
Some(device) => device,
|
||||
None => err_handler!("Invalid device id"),
|
||||
};
|
||||
|
||||
let user = match User::find_by_uuid(&user_uuid, &conn) {
|
||||
let user = match User::find_by_uuid(&user_uuid, &conn).await {
|
||||
Some(user) => user,
|
||||
None => err_handler!("Device has no user associated"),
|
||||
};
|
||||
@@ -363,7 +367,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
if let Some(stamp_exception) =
|
||||
user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
{
|
||||
let current_route = match request.route().and_then(|r| r.name) {
|
||||
let current_route = match request.route().and_then(|r| r.name.as_deref()) {
|
||||
Some(name) => name,
|
||||
_ => err_handler!("Error getting current route for stamp exception"),
|
||||
};
|
||||
@@ -376,7 +380,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
// This prevents checking this stamp exception for new requests.
|
||||
let mut user = user;
|
||||
user.reset_stamp_exception();
|
||||
if let Err(e) = user.save(&conn) {
|
||||
if let Err(e) = user.save(&conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
}
|
||||
err_handler!("Stamp exception is expired")
|
||||
@@ -410,14 +414,14 @@ pub struct OrgHeaders {
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_org_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
|
||||
fn get_org_id(request: &Request<'_>) -> Option<String> {
|
||||
if let Some(Ok(org_id)) = request.param::<String>(1) {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
return Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(org_id)) = request.get_query_value::<String>("organizationId") {
|
||||
if let Some(Ok(org_id)) = request.query_value::<String>("organizationId") {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
return Some(org_id);
|
||||
}
|
||||
@@ -426,52 +430,48 @@ fn get_org_id(request: &Request) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<Headers>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
match get_org_id(request) {
|
||||
Some(org_id) => {
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(Headers::from_request(request).await);
|
||||
match get_org_id(request) {
|
||||
Some(org_id) => {
|
||||
let conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
let user = headers.user;
|
||||
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn) {
|
||||
Some(user) => {
|
||||
if user.status == UserOrgStatus::Confirmed as i32 {
|
||||
user
|
||||
} else {
|
||||
err_handler!("The current user isn't confirmed member of the organization")
|
||||
}
|
||||
}
|
||||
None => err_handler!("The current user isn't member of the organization"),
|
||||
};
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user,
|
||||
org_user_type: {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
||||
org_usr_type
|
||||
} else {
|
||||
// This should only happen if the DB is corrupted
|
||||
err_handler!("Unknown user type in the database")
|
||||
}
|
||||
},
|
||||
org_user,
|
||||
org_id,
|
||||
})
|
||||
let user = headers.user;
|
||||
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn).await {
|
||||
Some(user) => {
|
||||
if user.status == UserOrgStatus::Confirmed as i32 {
|
||||
user
|
||||
} else {
|
||||
err_handler!("The current user isn't confirmed member of the organization")
|
||||
}
|
||||
}
|
||||
_ => err_handler!("Error getting the organization id"),
|
||||
}
|
||||
None => err_handler!("The current user isn't member of the organization"),
|
||||
};
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user,
|
||||
org_user_type: {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
||||
org_usr_type
|
||||
} else {
|
||||
// This should only happen if the DB is corrupted
|
||||
err_handler!("Unknown user type in the database")
|
||||
}
|
||||
},
|
||||
org_user,
|
||||
org_id,
|
||||
})
|
||||
}
|
||||
_ => err_handler!("Error getting the organization id"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -483,25 +483,21 @@ pub struct AdminHeaders {
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for AdminHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Admin {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type >= UserOrgType::Admin {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -519,14 +515,14 @@ impl From<AdminHeaders> for Headers {
|
||||
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
||||
// but there could be cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_col_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(col_id)) = request.get_param::<String>(3) {
|
||||
fn get_col_id(request: &Request<'_>) -> Option<String> {
|
||||
if let Some(Ok(col_id)) = request.param::<String>(3) {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(col_id)) = request.get_query_value::<String>("collectionId") {
|
||||
if let Some(Ok(col_id)) = request.query_value::<String>("collectionId") {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
}
|
||||
@@ -545,46 +541,40 @@ pub struct ManagerHeaders {
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
match get_col_id(request) {
|
||||
Some(col_id) => {
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
match get_col_id(request) {
|
||||
Some(col_id) => {
|
||||
let conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !headers.org_user.has_full_access() {
|
||||
match CollectionUser::find_by_collection_and_user(
|
||||
&col_id,
|
||||
&headers.org_user.user_uuid,
|
||||
&conn,
|
||||
) {
|
||||
Some(_) => (),
|
||||
None => err_handler!("The current user isn't a manager for this collection"),
|
||||
}
|
||||
}
|
||||
if !headers.org_user.has_full_access() {
|
||||
match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn)
|
||||
.await
|
||||
{
|
||||
Some(_) => (),
|
||||
None => err_handler!("The current user isn't a manager for this collection"),
|
||||
}
|
||||
_ => err_handler!("Error getting the collection id"),
|
||||
}
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
_ => err_handler!("Error getting the collection id"),
|
||||
}
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -608,25 +598,21 @@ pub struct ManagerHeadersLoose {
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeadersLoose {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for ManagerHeadersLoose {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -647,24 +633,20 @@ pub struct OwnerHeaders {
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for OwnerHeaders {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for OwnerHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type == UserOrgType::Owner {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type == UserOrgType::Owner {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -678,10 +660,11 @@ pub struct ClientIp {
|
||||
pub ip: IpAddr,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ClientIp {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for ClientIp {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let ip = if CONFIG._ip_header_enabled() {
|
||||
req.headers().get_one(&CONFIG.ip_header()).and_then(|ip| {
|
||||
match ip.find(',') {
|
||||
|
||||
125
src/config.rs
125
src/config.rs
@@ -36,6 +36,9 @@ macro_rules! make_config {
|
||||
pub struct Config { inner: RwLock<Inner> }
|
||||
|
||||
struct Inner {
|
||||
rocket_shutdown_handle: Option<rocket::Shutdown>,
|
||||
ws_shutdown_handle: Option<tokio::sync::oneshot::Sender<()>>,
|
||||
|
||||
templates: Handlebars<'static>,
|
||||
config: ConfigItems,
|
||||
|
||||
@@ -56,13 +59,13 @@ macro_rules! make_config {
|
||||
impl ConfigBuilder {
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
fn from_env() -> Self {
|
||||
match dotenv::from_path(".env") {
|
||||
match dotenvy::from_path(get_env("ENV_FILE").unwrap_or_else(|| String::from(".env"))) {
|
||||
Ok(_) => (),
|
||||
Err(e) => match e {
|
||||
dotenv::Error::LineParse(msg, pos) => {
|
||||
dotenvy::Error::LineParse(msg, pos) => {
|
||||
panic!("Error loading the .env file:\nNear {:?} on position {}\nPlease fix and restart!\n", msg, pos);
|
||||
},
|
||||
dotenv::Error::Io(ioerr) => match ioerr.kind() {
|
||||
dotenvy::Error::Io(ioerr) => match ioerr.kind() {
|
||||
std::io::ErrorKind::NotFound => {
|
||||
println!("[INFO] No .env file found.\n");
|
||||
},
|
||||
@@ -88,8 +91,7 @@ macro_rules! make_config {
|
||||
}
|
||||
|
||||
fn from_file(path: &str) -> Result<Self, Error> {
|
||||
use crate::util::read_file_string;
|
||||
let config_str = read_file_string(path)?;
|
||||
let config_str = std::fs::read_to_string(path)?;
|
||||
serde_json::from_str(&config_str).map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -332,6 +334,8 @@ make_config! {
|
||||
attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments");
|
||||
/// Sends folder
|
||||
sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends");
|
||||
/// Temp folder |> Used for storing temporary file uploads
|
||||
tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp");
|
||||
/// Templates folder
|
||||
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
||||
/// Session JWT key
|
||||
@@ -426,11 +430,16 @@ make_config! {
|
||||
org_creation_users: String, true, def, "".to_string();
|
||||
/// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are otherwise disabled
|
||||
invitations_allowed: bool, true, def, true;
|
||||
/// Invitation token expiration time (in hours) |> The number of hours after which an organization invite token, emergency access invite token,
|
||||
/// email verification token and deletion request token will expire (must be at least 1)
|
||||
invitation_expiration_hours: u32, false, def, 120;
|
||||
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||
emergency_access_allowed: bool, true, def, true;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations.
|
||||
/// The changes only apply when a user changes their password. Not recommended to lower the value
|
||||
password_iterations: i32, true, def, 100_000;
|
||||
/// Allow password hints |> Controls whether users can set password hints. This setting applies globally to all users.
|
||||
password_hints_allowed: bool, true, def, true;
|
||||
/// Show password hint |> Controls whether a password hint should be shown directly in the web page
|
||||
/// if SMTP service is not configured. Not recommended for publicly-accessible instances as this
|
||||
/// provides unauthenticated access to potentially sensitive data.
|
||||
@@ -457,6 +466,10 @@ make_config! {
|
||||
/// service is set, an icon request to Vaultwarden will return an HTTP redirect to the
|
||||
/// corresponding icon at the external service.
|
||||
icon_service: String, false, def, "internal".to_string();
|
||||
/// Internal
|
||||
_icon_service_url: String, false, gen, |c| generate_icon_service_url(&c.icon_service);
|
||||
/// Internal
|
||||
_icon_service_csp: String, false, gen, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
|
||||
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
|
||||
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||
/// Temporary redirects are useful while testing different icon services, but once a service
|
||||
@@ -509,9 +522,15 @@ make_config! {
|
||||
/// Max database connection retries |> Number of times to retry the database connection during startup, with 1 second between each retry, set to 0 to retry indefinitely
|
||||
db_connection_retries: u32, false, def, 15;
|
||||
|
||||
/// Timeout when aquiring database connection
|
||||
database_timeout: u64, false, def, 30;
|
||||
|
||||
/// Database connection pool size
|
||||
database_max_conns: u32, false, def, 10;
|
||||
|
||||
/// Database connection init |> SQL statements to run when creating a new database connection, mainly useful for connection-scoped pragmas. If empty, a database-specific default is used.
|
||||
database_conn_init: String, false, def, "".to_string();
|
||||
|
||||
/// Bypass admin page security (Know the risks!) |> Disables the Admin Token for the admin page so you may use your own auth in-front
|
||||
disable_admin_token: bool, true, def, false;
|
||||
|
||||
@@ -561,12 +580,14 @@ make_config! {
|
||||
_enable_smtp: bool, true, def, true;
|
||||
/// Host
|
||||
smtp_host: String, true, option;
|
||||
/// Enable Secure SMTP |> (Explicit) - Enabling this by default would use STARTTLS (Standard ports 587 or 25)
|
||||
smtp_ssl: bool, true, def, true;
|
||||
/// Force TLS |> (Implicit) - Enabling this would force the use of an SSL/TLS connection, instead of upgrading an insecure one with STARTTLS (Standard port 465)
|
||||
smtp_explicit_tls: bool, true, def, false;
|
||||
/// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY
|
||||
smtp_ssl: bool, false, option;
|
||||
/// DEPRECATED smtp_explicit_tls |> DEPRECATED - Please use SMTP_SECURITY
|
||||
smtp_explicit_tls: bool, false, option;
|
||||
/// Secure SMTP |> ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption
|
||||
smtp_security: String, true, auto, |c| smtp_convert_deprecated_ssl_options(c.smtp_ssl, c.smtp_explicit_tls); // TODO: After deprecation make it `def, "starttls".to_string()`
|
||||
/// Port
|
||||
smtp_port: u16, true, auto, |c| if c.smtp_explicit_tls {465} else if c.smtp_ssl {587} else {25};
|
||||
smtp_port: u16, true, auto, |c| if c.smtp_security == *"force_tls" {465} else if c.smtp_security == *"starttls" {587} else {25};
|
||||
/// From Address
|
||||
smtp_from: String, true, def, String::new();
|
||||
/// From Name
|
||||
@@ -649,6 +670,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
if cfg._enable_smtp {
|
||||
match cfg.smtp_security.as_str() {
|
||||
"off" | "starttls" | "force_tls" => (),
|
||||
_ => err!(
|
||||
"`SMTP_SECURITY` is invalid. It needs to be one of the following options: starttls, force_tls or off"
|
||||
),
|
||||
}
|
||||
|
||||
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
|
||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
||||
}
|
||||
@@ -701,6 +729,10 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
_ => err!("Only HTTP 301/302 and 307/308 redirects are supported"),
|
||||
}
|
||||
|
||||
if cfg.invitation_expiration_hours < 1 {
|
||||
err!("`INVITATION_EXPIRATION_HOURS` has a minimum duration of 1 hour")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -727,6 +759,48 @@ fn extract_url_path(url: &str) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate the correct URL for the icon service.
|
||||
/// This will be used within icons.rs to call the external icon service.
|
||||
fn generate_icon_service_url(icon_service: &str) -> String {
|
||||
match icon_service {
|
||||
"internal" => "".to_string(),
|
||||
"bitwarden" => "https://icons.bitwarden.net/{}/icon.png".to_string(),
|
||||
"duckduckgo" => "https://icons.duckduckgo.com/ip3/{}.ico".to_string(),
|
||||
"google" => "https://www.google.com/s2/favicons?domain={}&sz=32".to_string(),
|
||||
_ => icon_service.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate the CSP string needed to allow redirected icon fetching
|
||||
fn generate_icon_service_csp(icon_service: &str, icon_service_url: &str) -> String {
|
||||
// We split on the first '{', since that is the variable delimiter for an icon service URL.
|
||||
// Everything up until the first '{' should be fixed and can be used as an CSP string.
|
||||
let csp_string = match icon_service_url.split_once('{') {
|
||||
Some((c, _)) => c.to_string(),
|
||||
None => "".to_string(),
|
||||
};
|
||||
|
||||
// Because Google does a second redirect to there gstatic.com domain, we need to add an extra csp string.
|
||||
match icon_service {
|
||||
"google" => csp_string + " https://*.gstatic.com/favicon",
|
||||
_ => csp_string,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the old SMTP_SSL and SMTP_EXPLICIT_TLS options
|
||||
fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls: Option<bool>) -> String {
|
||||
if smtp_explicit_tls.is_some() || smtp_ssl.is_some() {
|
||||
println!("[DEPRECATED]: `SMTP_SSL` or `SMTP_EXPLICIT_TLS` is set. Please use `SMTP_SECURITY` instead.");
|
||||
}
|
||||
if smtp_explicit_tls.is_some() && smtp_explicit_tls.unwrap() {
|
||||
return "force_tls".to_string();
|
||||
} else if smtp_ssl.is_some() && !smtp_ssl.unwrap() {
|
||||
return "off".to_string();
|
||||
}
|
||||
// Return the default `starttls` in all other cases
|
||||
"starttls".to_string()
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load() -> Result<Self, Error> {
|
||||
// Loading from env and file
|
||||
@@ -743,6 +817,8 @@ impl Config {
|
||||
|
||||
Ok(Config {
|
||||
inner: RwLock::new(Inner {
|
||||
rocket_shutdown_handle: None,
|
||||
ws_shutdown_handle: None,
|
||||
templates: load_templates(&config.templates_folder),
|
||||
config,
|
||||
_env,
|
||||
@@ -907,6 +983,26 @@ impl Config {
|
||||
hb.render(name, data).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_rocket_shutdown_handle(&self, handle: rocket::Shutdown) {
|
||||
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
|
||||
}
|
||||
|
||||
pub fn set_ws_shutdown_handle(&self, handle: tokio::sync::oneshot::Sender<()>) {
|
||||
self.inner.write().unwrap().ws_shutdown_handle = Some(handle);
|
||||
}
|
||||
|
||||
pub fn shutdown(&self) {
|
||||
if let Ok(mut c) = self.inner.write() {
|
||||
if let Some(handle) = c.ws_shutdown_handle.take() {
|
||||
handle.send(()).ok();
|
||||
}
|
||||
|
||||
if let Some(handle) = c.rocket_shutdown_handle.take() {
|
||||
handle.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, Renderable};
|
||||
@@ -980,7 +1076,7 @@ where
|
||||
|
||||
fn case_helper<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
r: &'reg Handlebars,
|
||||
r: &'reg Handlebars<'_>,
|
||||
ctx: &'rc Context,
|
||||
rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
@@ -997,17 +1093,16 @@ fn case_helper<'reg, 'rc>(
|
||||
|
||||
fn js_escape_helper<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
_r: &'reg Handlebars,
|
||||
_r: &'reg Handlebars<'_>,
|
||||
_ctx: &'rc Context,
|
||||
_rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"jsesc\""))?;
|
||||
|
||||
let no_quote = h.param(1).is_some();
|
||||
|
||||
let value =
|
||||
param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||
let value = param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"jsesc\" is not a String"))?;
|
||||
|
||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
if !no_quote {
|
||||
|
||||
230
src/db/mod.rs
230
src/db/mod.rs
@@ -1,8 +1,20 @@
|
||||
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use diesel::{
|
||||
connection::SimpleConnection,
|
||||
r2d2::{ConnectionManager, CustomizeConnection, Pool, PooledConnection},
|
||||
};
|
||||
|
||||
use rocket::{
|
||||
http::Status,
|
||||
outcome::IntoOutcome,
|
||||
request::{FromRequest, Outcome},
|
||||
Request, State,
|
||||
Request,
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
sync::{Mutex, OwnedSemaphorePermit, Semaphore},
|
||||
time::timeout,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -22,6 +34,23 @@ pub mod __mysql_schema;
|
||||
#[path = "schemas/postgresql/schema.rs"]
|
||||
pub mod __postgresql_schema;
|
||||
|
||||
// These changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools
|
||||
|
||||
// A wrapper around spawn_blocking that propagates panics to the calling code.
|
||||
pub async fn run_blocking<F, R>(job: F) -> R
|
||||
where
|
||||
F: FnOnce() -> R + Send + 'static,
|
||||
R: Send + 'static,
|
||||
{
|
||||
match tokio::task::spawn_blocking(job).await {
|
||||
Ok(ret) => ret,
|
||||
Err(e) => match e.try_into_panic() {
|
||||
Ok(panic) => std::panic::resume_unwind(panic),
|
||||
Err(_) => unreachable!("spawn_blocking tasks are never cancelled"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// This is used to generate the main DbConn and DbPool enums, which contain one variant for each database supported
|
||||
macro_rules! generate_connections {
|
||||
( $( $name:ident: $ty:ty ),+ ) => {
|
||||
@@ -29,15 +58,74 @@ macro_rules! generate_connections {
|
||||
#[derive(Eq, PartialEq)]
|
||||
pub enum DbConnType { $( $name, )+ }
|
||||
|
||||
pub struct DbConn {
|
||||
conn: Arc<Mutex<Option<DbConnInner>>>,
|
||||
permit: Option<OwnedSemaphorePermit>,
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum DbConn { $( #[cfg($name)] $name(PooledConnection<ConnectionManager< $ty >>), )+ }
|
||||
pub enum DbConnInner { $( #[cfg($name)] $name(PooledConnection<ConnectionManager< $ty >>), )+ }
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DbConnOptions {
|
||||
pub init_stmts: String,
|
||||
}
|
||||
|
||||
$( // Based on <https://stackoverflow.com/a/57717533>.
|
||||
#[cfg($name)]
|
||||
impl CustomizeConnection<$ty, diesel::r2d2::Error> for DbConnOptions {
|
||||
fn on_acquire(&self, conn: &mut $ty) -> Result<(), diesel::r2d2::Error> {
|
||||
(|| {
|
||||
if !self.init_stmts.is_empty() {
|
||||
conn.batch_execute(&self.init_stmts)?;
|
||||
}
|
||||
Ok(())
|
||||
})().map_err(diesel::r2d2::Error::QueryError)
|
||||
}
|
||||
})+
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DbPool {
|
||||
// This is an 'Option' so that we can drop the pool in a 'spawn_blocking'.
|
||||
pool: Option<DbPoolInner>,
|
||||
semaphore: Arc<Semaphore>
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
#[derive(Clone)]
|
||||
pub enum DbPool { $( #[cfg($name)] $name(Pool<ConnectionManager< $ty >>), )+ }
|
||||
pub enum DbPoolInner { $( #[cfg($name)] $name(Pool<ConnectionManager< $ty >>), )+ }
|
||||
|
||||
impl Drop for DbConn {
|
||||
fn drop(&mut self) {
|
||||
let conn = self.conn.clone();
|
||||
let permit = self.permit.take();
|
||||
|
||||
// Since connection can't be on the stack in an async fn during an
|
||||
// await, we have to spawn a new blocking-safe thread...
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// And then re-enter the runtime to wait on the async mutex, but in a blocking fashion.
|
||||
let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned());
|
||||
|
||||
if let Some(conn) = conn.take() {
|
||||
drop(conn);
|
||||
}
|
||||
|
||||
// Drop permit after the connection is dropped
|
||||
drop(permit);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DbPool {
|
||||
fn drop(&mut self) {
|
||||
let pool = self.pool.take();
|
||||
tokio::task::spawn_blocking(move || drop(pool));
|
||||
}
|
||||
}
|
||||
|
||||
impl DbPool {
|
||||
// For the given database URL, guess it's type, run migrations create pool and return it
|
||||
// For the given database URL, guess its type, run migrations, create pool, and return it
|
||||
#[allow(clippy::diverging_sub_expression)]
|
||||
pub fn from_config() -> Result<Self, Error> {
|
||||
let url = CONFIG.database_url();
|
||||
let conn_type = DbConnType::from_url(&url)?;
|
||||
@@ -50,9 +138,16 @@ macro_rules! generate_connections {
|
||||
let manager = ConnectionManager::new(&url);
|
||||
let pool = Pool::builder()
|
||||
.max_size(CONFIG.database_max_conns())
|
||||
.connection_timeout(Duration::from_secs(CONFIG.database_timeout()))
|
||||
.connection_customizer(Box::new(DbConnOptions{
|
||||
init_stmts: conn_type.get_init_stmts()
|
||||
}))
|
||||
.build(manager)
|
||||
.map_res("Failed to create pool")?;
|
||||
return Ok(Self::$name(pool));
|
||||
return Ok(DbPool {
|
||||
pool: Some(DbPoolInner::$name(pool)),
|
||||
semaphore: Arc::new(Semaphore::new(CONFIG.database_max_conns() as usize)),
|
||||
});
|
||||
}
|
||||
#[cfg(not($name))]
|
||||
#[allow(unreachable_code)]
|
||||
@@ -61,10 +156,26 @@ macro_rules! generate_connections {
|
||||
)+ }
|
||||
}
|
||||
// Get a connection from the pool
|
||||
pub fn get(&self) -> Result<DbConn, Error> {
|
||||
match self { $(
|
||||
pub async fn get(&self) -> Result<DbConn, Error> {
|
||||
let duration = Duration::from_secs(CONFIG.database_timeout());
|
||||
let permit = match timeout(duration, self.semaphore.clone().acquire_owned()).await {
|
||||
Ok(p) => p.expect("Semaphore should be open"),
|
||||
Err(_) => {
|
||||
err!("Timeout waiting for database connection");
|
||||
}
|
||||
};
|
||||
|
||||
match self.pool.as_ref().expect("DbPool.pool should always be Some()") { $(
|
||||
#[cfg($name)]
|
||||
Self::$name(p) => Ok(DbConn::$name(p.get().map_res("Error retrieving connection from pool")?)),
|
||||
DbPoolInner::$name(p) => {
|
||||
let pool = p.clone();
|
||||
let c = run_blocking(move || pool.get_timeout(duration)).await.map_res("Error retrieving connection from pool")?;
|
||||
|
||||
return Ok(DbConn {
|
||||
conn: Arc::new(Mutex::new(Some(DbConnInner::$name(c)))),
|
||||
permit: Some(permit)
|
||||
});
|
||||
},
|
||||
)+ }
|
||||
}
|
||||
}
|
||||
@@ -104,6 +215,23 @@ impl DbConnType {
|
||||
err!("`DATABASE_URL` looks like a SQLite URL, but 'sqlite' feature is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_init_stmts(&self) -> String {
|
||||
let init_stmts = CONFIG.database_conn_init();
|
||||
if !init_stmts.is_empty() {
|
||||
init_stmts
|
||||
} else {
|
||||
self.default_init_stmts()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_init_stmts(&self) -> String {
|
||||
match self {
|
||||
Self::sqlite => "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;".to_string(),
|
||||
Self::mysql => "".to_string(),
|
||||
Self::postgresql => "".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
@@ -113,42 +241,52 @@ macro_rules! db_run {
|
||||
db_run! { $conn: sqlite, mysql, postgresql $body }
|
||||
};
|
||||
|
||||
// Different code for each db
|
||||
( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
match $conn {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
crate::db::DbConn::$db(ref $conn) => {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
#[allow(unused)] use [<__ $db _model>]::*;
|
||||
#[allow(unused)] use crate::db::FromDb;
|
||||
}
|
||||
$body
|
||||
},
|
||||
)+)+
|
||||
}}
|
||||
};
|
||||
|
||||
// Same for all dbs
|
||||
( @raw $conn:ident: $body:block ) => {
|
||||
db_run! { @raw $conn: sqlite, mysql, postgresql $body }
|
||||
};
|
||||
|
||||
// Different code for each db
|
||||
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {
|
||||
( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused_variables)]
|
||||
match $conn {
|
||||
$($(
|
||||
#[allow(unused)] use $crate::db::FromDb;
|
||||
|
||||
let conn = $conn.conn.clone();
|
||||
let mut conn = conn.lock_owned().await;
|
||||
match conn.as_mut().expect("internal invariant broken: self.connection is Some") {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
crate::db::DbConn::$db(ref $conn) => {
|
||||
$body
|
||||
$crate::db::DbConnInner::$db($conn) => {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
#[allow(unused)] use [<__ $db _model>]::*;
|
||||
}
|
||||
|
||||
tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead
|
||||
},
|
||||
)+)+
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused)] use $crate::db::FromDb;
|
||||
|
||||
let conn = $conn.conn.clone();
|
||||
let mut conn = conn.lock_owned().await;
|
||||
match conn.as_mut().expect("internal invariant broken: self.connection is Some") {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
$crate::db::DbConnInner::$db($conn) => {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
// @ RAW: #[allow(unused)] use [<__ $db _model>]::*;
|
||||
}
|
||||
|
||||
tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead
|
||||
},
|
||||
)+)+
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
pub trait FromDb {
|
||||
@@ -201,7 +339,7 @@ macro_rules! db_object {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use super::*;
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused)] use crate::db::[<__ $db _schema>]::*;
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::*;
|
||||
|
||||
$( #[$attr] )*
|
||||
pub struct [<$name Db>] { $(
|
||||
@@ -213,7 +351,7 @@ macro_rules! db_object {
|
||||
#[inline(always)] pub fn to_db(x: &super::$name) -> Self { Self { $( $field: x.$field.clone(), )+ } }
|
||||
}
|
||||
|
||||
impl crate::db::FromDb for [<$name Db>] {
|
||||
impl $crate::db::FromDb for [<$name Db>] {
|
||||
type Output = super::$name;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[inline(always)] fn from_db(self) -> Self::Output { super::$name { $( $field: self.$field, )+ } }
|
||||
@@ -227,9 +365,10 @@ pub mod models;
|
||||
|
||||
/// Creates a back-up of the sqlite database
|
||||
/// MySQL/MariaDB and PostgreSQL are not supported.
|
||||
pub fn backup_database(conn: &DbConn) -> Result<(), Error> {
|
||||
pub async fn backup_database(conn: &DbConn) -> Result<(), Error> {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
let _ = conn;
|
||||
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||
}
|
||||
sqlite {
|
||||
@@ -244,7 +383,7 @@ pub fn backup_database(conn: &DbConn) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
/// Get the SQL Server version
|
||||
pub fn get_sql_server_version(conn: &DbConn) -> String {
|
||||
pub async fn get_sql_server_version(conn: &DbConn) -> String {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
no_arg_sql_function!(version, diesel::sql_types::Text);
|
||||
@@ -260,15 +399,14 @@ pub fn get_sql_server_version(conn: &DbConn) -> String {
|
||||
/// Attempts to retrieve a single connection from the managed database pool. If
|
||||
/// no pool is currently managed, fails with an `InternalServerError` status. If
|
||||
/// no connections are available, fails with a `ServiceUnavailable` status.
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for DbConn {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<DbConn, ()> {
|
||||
// https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c
|
||||
let pool = try_outcome!(request.guard::<State<DbPool>>());
|
||||
match pool.get() {
|
||||
Ok(conn) => Outcome::Success(conn),
|
||||
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())),
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
match request.rocket().state::<DbPool>() {
|
||||
Some(p) => p.get().await.map_err(|_| ()).into_outcome(Status::ServiceUnavailable),
|
||||
None => Outcome::Failure((Status::InternalServerError, ())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,14 +2,12 @@ use std::io::ErrorKind;
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
use super::Cipher;
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "attachments"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(super::Cipher, foreign_key = "cipher_uuid")]
|
||||
#[primary_key(id)]
|
||||
pub struct Attachment {
|
||||
pub id: String,
|
||||
@@ -60,7 +58,7 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Attachment {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
match diesel::replace_into(attachments::table)
|
||||
@@ -92,7 +90,7 @@ impl Attachment {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
crate::util::retry(
|
||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||
@@ -116,14 +114,14 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for attachment in Attachment::find_by_cipher(cipher_uuid, conn) {
|
||||
attachment.delete(conn)?;
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await {
|
||||
attachment.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_id(id: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_id(id: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::id.eq(id.to_lowercase()))
|
||||
@@ -133,7 +131,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
||||
@@ -143,7 +141,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn size_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn size_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<i64> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -155,7 +153,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -166,7 +164,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn size_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn size_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<i64> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -178,7 +176,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -188,4 +186,15 @@ impl Attachment {
|
||||
.unwrap_or(0)
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_by_ciphers(cipher_uuids: &Vec<String>, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
|
||||
.select(attachments::all_columns)
|
||||
.load::<AttachmentDb>(conn)
|
||||
.expect("Error loading attachments")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
use crate::CONFIG;
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::CONFIG;
|
||||
use super::{Attachment, CollectionCipher, Favorite, FolderCipher, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
use super::{
|
||||
Attachment, CollectionCipher, Favorite, FolderCipher, Organization, User, UserOrgStatus, UserOrgType,
|
||||
UserOrganization,
|
||||
};
|
||||
use crate::api::core::CipherSyncData;
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "ciphers"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Organization, foreign_key = "organization_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
pub struct Cipher {
|
||||
pub uuid: String,
|
||||
@@ -82,22 +80,32 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Cipher {
|
||||
pub fn to_json(&self, host: &str, user_uuid: &str, conn: &DbConn) -> Value {
|
||||
pub async fn to_json(
|
||||
&self,
|
||||
host: &str,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &DbConn,
|
||||
) -> Value {
|
||||
use crate::util::format_date;
|
||||
|
||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn);
|
||||
// When there are no attachments use null instead of an empty array
|
||||
let attachments_json = if attachments.is_empty() {
|
||||
Value::Null
|
||||
let mut attachments_json: Value = Value::Null;
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
|
||||
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect();
|
||||
}
|
||||
} else {
|
||||
attachments.iter().map(|c| c.to_json(host)).collect()
|
||||
};
|
||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
|
||||
if !attachments.is_empty() {
|
||||
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect()
|
||||
}
|
||||
}
|
||||
|
||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
let password_history_json =
|
||||
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
|
||||
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, conn) {
|
||||
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
@@ -109,7 +117,7 @@ impl Cipher {
|
||||
// If not passing an empty object, mobile clients will crash.
|
||||
let mut type_data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({}));
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||
if self.atype == 1 {
|
||||
if type_data_json["Uris"].is_array() {
|
||||
@@ -124,13 +132,23 @@ impl Cipher {
|
||||
// Clone the type_data and add some default value.
|
||||
let mut data_json = type_data_json.clone();
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// data_json should always contain the following keys with every atype
|
||||
data_json["Fields"] = json!(fields_json);
|
||||
data_json["Name"] = json!(self.name);
|
||||
data_json["Notes"] = json!(self.notes);
|
||||
data_json["PasswordHistory"] = json!(password_history_json);
|
||||
|
||||
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
Cow::from(cipher_collections)
|
||||
} else {
|
||||
Cow::from(Vec::with_capacity(0))
|
||||
}
|
||||
} else {
|
||||
Cow::from(self.get_collections(user_uuid, conn).await)
|
||||
};
|
||||
|
||||
// There are three types of cipher response models in upstream
|
||||
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
|
||||
// of increasing level of detail). vaultwarden currently only
|
||||
@@ -142,10 +160,11 @@ impl Cipher {
|
||||
"Object": "cipherDetails",
|
||||
"Id": self.uuid,
|
||||
"Type": self.atype,
|
||||
"CreationDate": format_date(&self.created_at),
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"FolderId": self.get_folder_uuid(user_uuid, conn),
|
||||
"Favorite": self.is_favorite(user_uuid, conn),
|
||||
"FolderId": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string() ) } else { self.get_folder_uuid(user_uuid, conn).await },
|
||||
"Favorite": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_favorites.contains(&self.uuid) } else { self.is_favorite(user_uuid, conn).await },
|
||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"OrganizationId": self.organization_uuid,
|
||||
"Attachments": attachments_json,
|
||||
@@ -154,7 +173,7 @@ impl Cipher {
|
||||
"OrganizationUseTotp": true,
|
||||
|
||||
// This field is specific to the cipherDetails type.
|
||||
"CollectionIds": self.get_collections(user_uuid, conn),
|
||||
"CollectionIds": collection_ids,
|
||||
|
||||
"Name": self.name,
|
||||
"Notes": self.notes,
|
||||
@@ -189,28 +208,28 @@ impl Cipher {
|
||||
json_object
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
|
||||
pub async fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
|
||||
let mut user_uuids = Vec::new();
|
||||
match self.user_uuid {
|
||||
Some(ref user_uuid) => {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
user_uuids.push(user_uuid.clone())
|
||||
}
|
||||
None => {
|
||||
// Belongs to Organization, need to update affected users
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
for user_org in UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
user_uuids
|
||||
}
|
||||
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
@@ -244,13 +263,13 @@ impl Cipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
|
||||
FolderCipher::delete_all_by_cipher(&self.uuid, conn)?;
|
||||
CollectionCipher::delete_all_by_cipher(&self.uuid, conn)?;
|
||||
Attachment::delete_all_by_cipher(&self.uuid, conn)?;
|
||||
Favorite::delete_all_by_cipher(&self.uuid, conn)?;
|
||||
FolderCipher::delete_all_by_cipher(&self.uuid, conn).await?;
|
||||
CollectionCipher::delete_all_by_cipher(&self.uuid, conn).await?;
|
||||
Attachment::delete_all_by_cipher(&self.uuid, conn).await?;
|
||||
Favorite::delete_all_by_cipher(&self.uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(ciphers::table.filter(ciphers::uuid.eq(&self.uuid)))
|
||||
@@ -259,54 +278,55 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_by_org(org_uuid, conn) {
|
||||
cipher.delete(conn)?;
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
// TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching.
|
||||
for cipher in Self::find_by_org(org_uuid, conn).await {
|
||||
cipher.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_owned_by_user(user_uuid, conn) {
|
||||
cipher.delete(conn)?;
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_owned_by_user(user_uuid, conn).await {
|
||||
cipher.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Purge all ciphers that are old enough to be auto-deleted.
|
||||
pub fn purge_trash(conn: &DbConn) {
|
||||
pub async fn purge_trash(conn: &DbConn) {
|
||||
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
||||
let now = Utc::now().naive_utc();
|
||||
let dt = now - Duration::days(auto_delete_days);
|
||||
for cipher in Self::find_deleted_before(&dt, conn) {
|
||||
cipher.delete(conn).ok();
|
||||
for cipher in Self::find_deleted_before(&dt, conn).await {
|
||||
cipher.delete(conn).await.ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
pub async fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
|
||||
match (self.get_folder_uuid(user_uuid, conn), folder_uuid) {
|
||||
match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) {
|
||||
// No changes
|
||||
(None, None) => Ok(()),
|
||||
(Some(ref old), Some(ref new)) if old == new => Ok(()),
|
||||
|
||||
// Add to folder
|
||||
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn),
|
||||
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await,
|
||||
|
||||
// Remove from folder
|
||||
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn) {
|
||||
Some(old) => old.delete(conn),
|
||||
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
|
||||
Some(old) => old.delete(conn).await,
|
||||
None => err!("Couldn't move from previous folder"),
|
||||
},
|
||||
|
||||
// Move to another folder
|
||||
(Some(old), Some(new)) => {
|
||||
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn) {
|
||||
old.delete(conn)?;
|
||||
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
|
||||
old.delete(conn).await?;
|
||||
}
|
||||
FolderCipher::new(&new, &self.uuid).save(conn)
|
||||
FolderCipher::new(&new, &self.uuid).save(conn).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -317,13 +337,21 @@ impl Cipher {
|
||||
}
|
||||
|
||||
/// Returns whether this cipher is owned by an org in which the user has full access.
|
||||
pub fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
pub async fn is_in_full_access_org(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &DbConn,
|
||||
) -> bool {
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(cached_user_org) = cipher_sync_data.user_organizations.get(org_uuid) {
|
||||
return cached_user_org.has_full_access();
|
||||
}
|
||||
} else if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await {
|
||||
return user_org.has_full_access();
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
@@ -332,18 +360,62 @@ impl Cipher {
|
||||
/// not in any collection the user has access to. Otherwise, the user has
|
||||
/// access to this cipher, and Some(read_only, hide_passwords) represents
|
||||
/// the access restrictions.
|
||||
pub fn get_access_restrictions(&self, user_uuid: &str, conn: &DbConn) -> Option<(bool, bool)> {
|
||||
pub async fn get_access_restrictions(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &DbConn,
|
||||
) -> Option<(bool, bool)> {
|
||||
// Check whether this cipher is directly owned by the user, or is in
|
||||
// a collection that the user has full access to. If so, there are no
|
||||
// access restrictions.
|
||||
if self.is_owned_by_user(user_uuid) || self.is_in_full_access_org(user_uuid, conn) {
|
||||
if self.is_owned_by_user(user_uuid) || self.is_in_full_access_org(user_uuid, cipher_sync_data, conn).await {
|
||||
return Some((false, false));
|
||||
}
|
||||
|
||||
let rows = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
let mut rows: Vec<(bool, bool)> = Vec::new();
|
||||
if let Some(collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
for collection in collections {
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(collection) {
|
||||
rows.push((uc.read_only, uc.hide_passwords));
|
||||
}
|
||||
}
|
||||
}
|
||||
rows
|
||||
} else {
|
||||
self.get_collections_access_flags(user_uuid, conn).await
|
||||
};
|
||||
|
||||
if rows.is_empty() {
|
||||
// This cipher isn't in any collections accessible to the user.
|
||||
return None;
|
||||
}
|
||||
|
||||
// A cipher can be in multiple collections with inconsistent access flags.
|
||||
// For example, a cipher could be in one collection where the user has
|
||||
// read-only access, but also in another collection where the user has
|
||||
// read/write access. For a flag to be in effect for a cipher, upstream
|
||||
// requires all collections the cipher is in to have that flag set.
|
||||
// Therefore, we do a boolean AND of all values in each of the `read_only`
|
||||
// and `hide_passwords` columns. This could ideally be done as part of the
|
||||
// query, but Diesel doesn't support a min() or bool_and() function on
|
||||
// booleans and this behavior isn't portable anyway.
|
||||
let mut read_only = true;
|
||||
let mut hide_passwords = true;
|
||||
for (ro, hp) in rows.iter() {
|
||||
read_only &= ro;
|
||||
hide_passwords &= hp;
|
||||
}
|
||||
|
||||
Some((read_only, hide_passwords))
|
||||
}
|
||||
|
||||
pub async fn get_collections_access_flags(&self, user_uuid: &str, conn: &DbConn) -> Vec<(bool, bool)> {
|
||||
db_run! {conn: {
|
||||
// Check whether this cipher is in any collections accessible to the
|
||||
// user. If so, retrieve the access flags for each collection.
|
||||
let rows = ciphers::table
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(&self.uuid))
|
||||
.inner_join(ciphers_collections::table.on(
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
|
||||
@@ -352,58 +424,35 @@ impl Cipher {
|
||||
.and(users_collections::user_uuid.eq(user_uuid))))
|
||||
.select((users_collections::read_only, users_collections::hide_passwords))
|
||||
.load::<(bool, bool)>(conn)
|
||||
.expect("Error getting access restrictions");
|
||||
|
||||
if rows.is_empty() {
|
||||
// This cipher isn't in any collections accessible to the user.
|
||||
return None;
|
||||
}
|
||||
|
||||
// A cipher can be in multiple collections with inconsistent access flags.
|
||||
// For example, a cipher could be in one collection where the user has
|
||||
// read-only access, but also in another collection where the user has
|
||||
// read/write access. For a flag to be in effect for a cipher, upstream
|
||||
// requires all collections the cipher is in to have that flag set.
|
||||
// Therefore, we do a boolean AND of all values in each of the `read_only`
|
||||
// and `hide_passwords` columns. This could ideally be done as part of the
|
||||
// query, but Diesel doesn't support a min() or bool_and() function on
|
||||
// booleans and this behavior isn't portable anyway.
|
||||
let mut read_only = true;
|
||||
let mut hide_passwords = true;
|
||||
for (ro, hp) in rows.iter() {
|
||||
read_only &= ro;
|
||||
hide_passwords &= hp;
|
||||
}
|
||||
|
||||
Some((read_only, hide_passwords))
|
||||
.expect("Error getting access restrictions")
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match self.get_access_restrictions(user_uuid, conn) {
|
||||
pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match self.get_access_restrictions(user_uuid, None, conn).await {
|
||||
Some((read_only, _hide_passwords)) => !read_only,
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
self.get_access_restrictions(user_uuid, conn).is_some()
|
||||
pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
self.get_access_restrictions(user_uuid, None, conn).await.is_some()
|
||||
}
|
||||
|
||||
// Returns whether this cipher is a favorite of the specified user.
|
||||
pub fn is_favorite(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
Favorite::is_favorite(&self.uuid, user_uuid, conn)
|
||||
pub async fn is_favorite(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
Favorite::is_favorite(&self.uuid, user_uuid, conn).await
|
||||
}
|
||||
|
||||
// Sets whether this cipher is a favorite of the specified user.
|
||||
pub fn set_favorite(&self, favorite: Option<bool>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn set_favorite(&self, favorite: Option<bool>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
match favorite {
|
||||
None => Ok(()), // No change requested.
|
||||
Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn),
|
||||
Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option<String> {
|
||||
pub async fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option<String> {
|
||||
db_run! {conn: {
|
||||
folders_ciphers::table
|
||||
.inner_join(folders::table)
|
||||
@@ -415,7 +464,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(uuid))
|
||||
@@ -437,7 +486,7 @@ impl Cipher {
|
||||
// true, then the non-interesting ciphers will not be returned. As a
|
||||
// result, those ciphers will not appear in "My Vault" for the org
|
||||
// owner/admin, but they can still be accessed via the org vault view.
|
||||
pub fn find_by_user(user_uuid: &str, visible_only: bool, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
let mut query = ciphers::table
|
||||
.left_join(ciphers_collections::table.on(
|
||||
@@ -472,12 +521,12 @@ impl Cipher {
|
||||
}
|
||||
|
||||
// Find all ciphers visible to the specified user.
|
||||
pub fn find_by_user_visible(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
Self::find_by_user(user_uuid, true, conn)
|
||||
pub async fn find_by_user_visible(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
Self::find_by_user(user_uuid, true, conn).await
|
||||
}
|
||||
|
||||
// Find all ciphers directly owned by the specified user.
|
||||
pub fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(
|
||||
@@ -488,7 +537,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_owned_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_owned_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||
@@ -499,7 +548,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
@@ -507,7 +556,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
@@ -518,7 +567,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
folders_ciphers::table.inner_join(ciphers::table)
|
||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||
@@ -528,7 +577,7 @@ impl Cipher {
|
||||
}
|
||||
|
||||
/// Find all ciphers that were deleted before the specified datetime.
|
||||
pub fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::deleted_at.lt(dt))
|
||||
@@ -536,7 +585,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
||||
pub async fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.inner_join(collections::table.on(
|
||||
@@ -562,4 +611,32 @@ impl Cipher {
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
|
||||
/// Return a Vec with (cipher_uuid, collection_uuid)
|
||||
/// This is used during a full sync so we only need one query for all collections accessible.
|
||||
pub async fn get_collections_with_cipher_by_user(user_id: &str, conn: &DbConn) -> Vec<(String, String)> {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_id)
|
||||
)
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
users_collections::user_uuid.eq(user_id)
|
||||
)
|
||||
))
|
||||
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
|
||||
users_organizations::access_all.eq(true).or( // User has access all
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
|
||||
)
|
||||
))
|
||||
.select(ciphers_collections::all_columns)
|
||||
.load::<(String, String)>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{Cipher, Organization, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use super::{User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "collections"]
|
||||
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
pub struct Collection {
|
||||
pub uuid: String,
|
||||
@@ -13,10 +12,8 @@ db_object! {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[table_name = "users_collections"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
||||
#[primary_key(user_uuid, collection_uuid)]
|
||||
pub struct CollectionUser {
|
||||
pub user_uuid: String,
|
||||
@@ -25,10 +22,8 @@ db_object! {
|
||||
pub hide_passwords: bool,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[table_name = "ciphers_collections"]
|
||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
||||
#[primary_key(cipher_uuid, collection_uuid)]
|
||||
pub struct CollectionCipher {
|
||||
pub cipher_uuid: String,
|
||||
@@ -57,11 +52,32 @@ impl Collection {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_details(&self, user_uuid: &str, conn: &DbConn) -> Value {
|
||||
pub async fn to_json_details(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
|
||||
conn: &DbConn,
|
||||
) -> Value {
|
||||
let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
|
||||
Some(uo) if uo.has_full_access() => (false, false),
|
||||
Some(_) => {
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||
(uc.read_only, uc.hide_passwords)
|
||||
} else {
|
||||
(false, false)
|
||||
}
|
||||
}
|
||||
_ => (true, true),
|
||||
}
|
||||
} else {
|
||||
(!self.is_writable_by_user(user_uuid, conn).await, self.hide_passwords_for_user(user_uuid, conn).await)
|
||||
};
|
||||
|
||||
let mut json_object = self.to_json();
|
||||
json_object["Object"] = json!("collectionDetails");
|
||||
json_object["ReadOnly"] = json!(!self.is_writable_by_user(user_uuid, conn));
|
||||
json_object["HidePasswords"] = json!(self.hide_passwords_for_user(user_uuid, conn));
|
||||
json_object["ReadOnly"] = json!(read_only);
|
||||
json_object["HidePasswords"] = json!(hide_passwords);
|
||||
json_object
|
||||
}
|
||||
}
|
||||
@@ -73,8 +89,8 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Collection {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -107,10 +123,10 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
CollectionCipher::delete_all_by_collection(&self.uuid, conn)?;
|
||||
CollectionUser::delete_all_by_collection(&self.uuid, conn)?;
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
CollectionCipher::delete_all_by_collection(&self.uuid, conn).await?;
|
||||
CollectionUser::delete_all_by_collection(&self.uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid)))
|
||||
@@ -119,20 +135,20 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for collection in Self::find_by_organization(org_uuid, conn) {
|
||||
collection.delete(conn)?;
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for collection in Self::find_by_organization(org_uuid, conn).await {
|
||||
collection.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
pub async fn update_users_revision(&self, conn: &DbConn) {
|
||||
for user_org in UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
@@ -142,7 +158,7 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
@@ -167,11 +183,11 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
Self::find_by_user_uuid(user_uuid, conn).into_iter().filter(|c| c.org_uuid == org_uuid).collect()
|
||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
Self::find_by_user_uuid(user_uuid, conn).await.into_iter().filter(|c| c.org_uuid == org_uuid).collect()
|
||||
}
|
||||
|
||||
pub fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
@@ -181,7 +197,7 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
@@ -193,7 +209,7 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
@@ -219,8 +235,8 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn) {
|
||||
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
None => false, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
@@ -241,8 +257,8 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn) {
|
||||
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
None => true, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
@@ -266,7 +282,7 @@ impl Collection {
|
||||
|
||||
/// Database methods
|
||||
impl CollectionUser {
|
||||
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
@@ -279,14 +295,14 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn save(
|
||||
pub async fn save(
|
||||
user_uuid: &str,
|
||||
collection_uuid: &str,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -337,8 +353,8 @@ impl CollectionUser {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
@@ -351,7 +367,7 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_collection(collection_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_collection(collection_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||
@@ -362,7 +378,7 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_collection_and_user(collection_uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_collection_and_user(collection_uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||
@@ -374,10 +390,21 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
CollectionUser::find_by_collection(collection_uuid, conn).iter().for_each(|collection| {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn);
|
||||
});
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
.select(users_collections::all_columns)
|
||||
.load::<CollectionUserDb>(conn)
|
||||
.expect("Error loading users_collections")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn).await;
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid)))
|
||||
@@ -386,8 +413,8 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn);
|
||||
pub async fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
for user in collectionusers {
|
||||
@@ -405,8 +432,8 @@ impl CollectionUser {
|
||||
|
||||
/// Database methods
|
||||
impl CollectionCipher {
|
||||
pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn);
|
||||
pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn).await;
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -435,8 +462,8 @@ impl CollectionCipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn);
|
||||
pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
@@ -449,7 +476,7 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
|
||||
.execute(conn)
|
||||
@@ -457,7 +484,7 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
|
||||
.execute(conn)
|
||||
@@ -465,9 +492,9 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn update_users_revision(collection_uuid: &str, conn: &DbConn) {
|
||||
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn) {
|
||||
collection.update_users_revision(conn);
|
||||
pub async fn update_users_revision(collection_uuid: &str, conn: &DbConn) {
|
||||
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await {
|
||||
collection.update_users_revision(conn).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
|
||||
use super::User;
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "devices"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
#[primary_key(uuid, user_uuid)]
|
||||
pub struct Device {
|
||||
pub uuid: String,
|
||||
pub created_at: NaiveDateTime,
|
||||
@@ -89,11 +87,11 @@ impl Device {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + *DEFAULT_VALIDITY).timestamp(),
|
||||
iss: JWT_LOGIN_ISSUER.to_string(),
|
||||
sub: user.uuid.to_string(),
|
||||
sub: user.uuid.clone(),
|
||||
|
||||
premium: true,
|
||||
name: user.name.to_string(),
|
||||
email: user.email.to_string(),
|
||||
name: user.name.clone(),
|
||||
email: user.email.clone(),
|
||||
email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(),
|
||||
|
||||
orgowner,
|
||||
@@ -101,8 +99,8 @@ impl Device {
|
||||
orguser,
|
||||
orgmanager,
|
||||
|
||||
sstamp: user.security_stamp.to_string(),
|
||||
device: self.uuid.to_string(),
|
||||
sstamp: user.security_stamp.clone(),
|
||||
device: self.uuid.clone(),
|
||||
scope,
|
||||
amr: vec!["Application".into()],
|
||||
};
|
||||
@@ -118,7 +116,7 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Device {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
@@ -131,39 +129,33 @@ impl Device {
|
||||
postgresql {
|
||||
let value = DeviceDb::to_db(self);
|
||||
crate::util::retry(
|
||||
|| diesel::insert_into(devices::table).values(&value).on_conflict(devices::uuid).do_update().set(&value).execute(conn),
|
||||
|| diesel::insert_into(devices::table).values(&value).on_conflict((devices::uuid, devices::user_uuid)).do_update().set(&value).execute(conn),
|
||||
10,
|
||||
).map_res("Error saving device")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(devices::table.filter(devices::uuid.eq(self.uuid)))
|
||||
diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
.map_res("Error removing device")
|
||||
.map_res("Error removing devices for user")
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for device in Self::find_by_user(user_uuid, conn) {
|
||||
device.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::uuid.eq(uuid))
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
.first::<DeviceDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::refresh_token.eq(refresh_token))
|
||||
@@ -173,17 +165,7 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
.load::<DeviceDb>(conn)
|
||||
.expect("Error loading devices")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_latest_active_by_user(user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_latest_active_by_user(user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
|
||||
@@ -4,10 +4,9 @@ use serde_json::Value;
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "emergency_access"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "grantor_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
pub struct EmergencyAccess {
|
||||
pub uuid: String,
|
||||
@@ -73,8 +72,8 @@ impl EmergencyAccess {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_grantor_details(&self, conn: &DbConn) -> Value {
|
||||
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).expect("Grantor user not found.");
|
||||
pub async fn to_json_grantor_details(&self, conn: &DbConn) -> Value {
|
||||
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
@@ -89,11 +88,11 @@ impl EmergencyAccess {
|
||||
}
|
||||
|
||||
#[allow(clippy::manual_map)]
|
||||
pub fn to_json_grantee_details(&self, conn: &DbConn) -> Value {
|
||||
pub async fn to_json_grantee_details(&self, conn: &DbConn) -> Value {
|
||||
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
||||
Some(User::find_by_uuid(grantee_uuid, conn).expect("Grantee user not found."))
|
||||
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
|
||||
} else if let Some(email) = self.email.as_deref() {
|
||||
Some(User::find_by_mail(email, conn).expect("Grantee user not found."))
|
||||
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -155,8 +154,8 @@ use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
|
||||
impl EmergencyAccess {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn);
|
||||
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
@@ -190,18 +189,18 @@ impl EmergencyAccess {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn) {
|
||||
ea.delete(conn)?;
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await {
|
||||
ea.delete(conn).await?;
|
||||
}
|
||||
for ea in Self::find_all_by_grantee_uuid(user_uuid, conn) {
|
||||
ea.delete(conn)?;
|
||||
for ea in Self::find_all_by_grantee_uuid(user_uuid, conn).await {
|
||||
ea.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn);
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(emergency_access::table.filter(emergency_access::uuid.eq(self.uuid)))
|
||||
@@ -210,7 +209,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
@@ -219,7 +218,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||
grantor_uuid: &str,
|
||||
grantee_uuid: &str,
|
||||
email: &str,
|
||||
@@ -234,7 +233,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_all_recoveries(conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_recoveries(conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::status.eq(EmergencyAccessStatus::RecoveryInitiated as i32))
|
||||
@@ -242,7 +241,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
@@ -252,7 +251,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
|
||||
@@ -260,7 +259,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::email.eq(grantee_email))
|
||||
@@ -270,7 +269,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::grantor_uuid.eq(grantor_uuid))
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
use super::{Cipher, User};
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[table_name = "favorites"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||
#[primary_key(user_uuid, cipher_uuid)]
|
||||
pub struct Favorite {
|
||||
pub user_uuid: String,
|
||||
@@ -19,7 +17,7 @@ use crate::error::MapResult;
|
||||
|
||||
impl Favorite {
|
||||
// Returns whether the specified cipher is a favorite of the specified user.
|
||||
pub fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
pub async fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
db_run! { conn: {
|
||||
let query = favorites::table
|
||||
.filter(favorites::cipher_uuid.eq(cipher_uuid))
|
||||
@@ -31,11 +29,11 @@ impl Favorite {
|
||||
}
|
||||
|
||||
// Sets whether the specified cipher is a favorite of the specified user.
|
||||
pub fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn), favorite);
|
||||
pub async fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite);
|
||||
match (old, new) {
|
||||
(false, true) => {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
db_run! { conn: {
|
||||
diesel::insert_into(favorites::table)
|
||||
.values((
|
||||
@@ -47,7 +45,7 @@ impl Favorite {
|
||||
}}
|
||||
}
|
||||
(true, false) => {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
favorites::table
|
||||
@@ -64,7 +62,7 @@ impl Favorite {
|
||||
}
|
||||
|
||||
// Delete all favorite entries associated with the specified cipher.
|
||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid)))
|
||||
.execute(conn)
|
||||
@@ -73,11 +71,23 @@ impl Favorite {
|
||||
}
|
||||
|
||||
// Delete all favorite entries associated with the specified user.
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
.map_res("Error removing favorites by user")
|
||||
}}
|
||||
}
|
||||
|
||||
/// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers
|
||||
/// This is used during a full sync so we only need one query for all favorite cipher matches.
|
||||
pub async fn get_all_cipher_uuid_by_user(user_uuid: &str, conn: &DbConn) -> Vec<String> {
|
||||
db_run! { conn: {
|
||||
favorites::table
|
||||
.filter(favorites::user_uuid.eq(user_uuid))
|
||||
.select(favorites::cipher_uuid)
|
||||
.load::<String>(conn)
|
||||
.unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{Cipher, User};
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "folders"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
pub struct Folder {
|
||||
pub uuid: String,
|
||||
@@ -16,10 +15,8 @@ db_object! {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[table_name = "folders_ciphers"]
|
||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||
#[belongs_to(Folder, foreign_key = "folder_uuid")]
|
||||
#[primary_key(cipher_uuid, folder_uuid)]
|
||||
pub struct FolderCipher {
|
||||
pub cipher_uuid: String,
|
||||
@@ -70,8 +67,8 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Folder {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn).await;
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
@@ -105,9 +102,9 @@ impl Folder {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
FolderCipher::delete_all_by_folder(&self.uuid, conn)?;
|
||||
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn).await;
|
||||
FolderCipher::delete_all_by_folder(&self.uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(folders::table.filter(folders::uuid.eq(&self.uuid)))
|
||||
@@ -116,14 +113,14 @@ impl Folder {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for folder in Self::find_by_user(user_uuid, conn) {
|
||||
folder.delete(conn)?;
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for folder in Self::find_by_user(user_uuid, conn).await {
|
||||
folder.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
folders::table
|
||||
.filter(folders::uuid.eq(uuid))
|
||||
@@ -133,7 +130,7 @@ impl Folder {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
folders::table
|
||||
.filter(folders::user_uuid.eq(user_uuid))
|
||||
@@ -145,7 +142,7 @@ impl Folder {
|
||||
}
|
||||
|
||||
impl FolderCipher {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
// Not checking for ForeignKey Constraints here.
|
||||
@@ -167,7 +164,7 @@ impl FolderCipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
folders_ciphers::table
|
||||
@@ -179,7 +176,7 @@ impl FolderCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)))
|
||||
.execute(conn)
|
||||
@@ -187,7 +184,7 @@ impl FolderCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_folder(folder_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_folder(folder_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
|
||||
.execute(conn)
|
||||
@@ -195,7 +192,7 @@ impl FolderCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
folders_ciphers::table
|
||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||
@@ -206,7 +203,7 @@ impl FolderCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
folders_ciphers::table
|
||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||
@@ -215,4 +212,17 @@ impl FolderCipher {
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
/// Return a vec with (cipher_uuid, folder_uuid)
|
||||
/// This is used during a full sync so we only need one query for all folder matches.
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<(String, String)> {
|
||||
db_run! { conn: {
|
||||
folders_ciphers::table
|
||||
.inner_join(folders::table)
|
||||
.filter(folders::user_uuid.eq(user_uuid))
|
||||
.select(folders_ciphers::all_columns)
|
||||
.load::<(String, String)>(conn)
|
||||
.unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ pub use self::device::Device;
|
||||
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
|
||||
pub use self::favorite::Favorite;
|
||||
pub use self::folder::{Folder, FolderCipher};
|
||||
pub use self::org_policy::{OrgPolicy, OrgPolicyType};
|
||||
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
|
||||
pub use self::organization::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
pub use self::send::{Send, SendType};
|
||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||
|
||||
@@ -6,12 +6,11 @@ use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
use crate::util::UpCase;
|
||||
|
||||
use super::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "org_policies"]
|
||||
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
pub struct OrgPolicy {
|
||||
pub uuid: String,
|
||||
@@ -22,25 +21,37 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, num_derive::FromPrimitive)]
|
||||
// https://github.com/bitwarden/server/blob/b86a04cef9f1e1b82cf18e49fc94e017c641130c/src/Core/Enums/PolicyType.cs
|
||||
#[derive(Copy, Clone, Eq, PartialEq, num_derive::FromPrimitive)]
|
||||
pub enum OrgPolicyType {
|
||||
TwoFactorAuthentication = 0,
|
||||
MasterPassword = 1,
|
||||
PasswordGenerator = 2,
|
||||
SingleOrg = 3,
|
||||
// RequireSso = 4, // Not currently supported.
|
||||
// RequireSso = 4, // Not supported
|
||||
PersonalOwnership = 5,
|
||||
DisableSend = 6,
|
||||
SendOptions = 7,
|
||||
// ResetPassword = 8, // Not supported
|
||||
// MaximumVaultTimeout = 9, // Not supported (Not AGPLv3 Licensed)
|
||||
// DisablePersonalVaultExport = 10, // Not supported (Not AGPLv3 Licensed)
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Core/Models/Data/SendOptionsPolicyData.cs
|
||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendOptionsPolicyData {
|
||||
pub DisableHideEmail: bool,
|
||||
}
|
||||
|
||||
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum OrgPolicyErr {
|
||||
TwoFactorMissing,
|
||||
SingleOrgEnforced,
|
||||
}
|
||||
|
||||
/// Local methods
|
||||
impl OrgPolicy {
|
||||
pub fn new(org_uuid: String, atype: OrgPolicyType, data: String) -> Self {
|
||||
@@ -72,7 +83,7 @@ impl OrgPolicy {
|
||||
|
||||
/// Database methods
|
||||
impl OrgPolicy {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
match diesel::replace_into(org_policies::table)
|
||||
@@ -115,7 +126,7 @@ impl OrgPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(org_policies::table.filter(org_policies::uuid.eq(self.uuid)))
|
||||
.execute(conn)
|
||||
@@ -123,7 +134,7 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.filter(org_policies::uuid.eq(uuid))
|
||||
@@ -133,7 +144,7 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||
@@ -143,7 +154,7 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.inner_join(
|
||||
@@ -161,18 +172,18 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_org_and_type(org_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||
.filter(org_policies::atype.eq(atype))
|
||||
.filter(org_policies::atype.eq(policy_type as i32))
|
||||
.first::<OrgPolicyDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
|
||||
.execute(conn)
|
||||
@@ -180,52 +191,132 @@ impl OrgPolicy {
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_accepted_and_confirmed_by_user_and_active_policy(
|
||||
user_uuid: &str,
|
||||
policy_type: OrgPolicyType,
|
||||
conn: &DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.inner_join(
|
||||
users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(org_policies::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||
)
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Accepted as i32)
|
||||
)
|
||||
.or_filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
.filter(org_policies::atype.eq(policy_type as i32))
|
||||
.filter(org_policies::enabled.eq(true))
|
||||
.select(org_policies::all_columns)
|
||||
.load::<OrgPolicyDb>(conn)
|
||||
.expect("Error loading org_policy")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_confirmed_by_user_and_active_policy(
|
||||
user_uuid: &str,
|
||||
policy_type: OrgPolicyType,
|
||||
conn: &DbConn,
|
||||
) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
org_policies::table
|
||||
.inner_join(
|
||||
users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(org_policies::org_uuid)
|
||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||
)
|
||||
.filter(
|
||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||
)
|
||||
.filter(org_policies::atype.eq(policy_type as i32))
|
||||
.filter(org_policies::enabled.eq(true))
|
||||
.select(org_policies::all_columns)
|
||||
.load::<OrgPolicyDb>(conn)
|
||||
.expect("Error loading org_policy")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
/// Returns true if the user belongs to an org that has enabled the specified policy type,
|
||||
/// and the user is not an owner or admin of that org. This is only useful for checking
|
||||
/// applicability of policy types that have these particular semantics.
|
||||
pub fn is_applicable_to_user(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> bool {
|
||||
// TODO: Should check confirmed and accepted users
|
||||
for policy in OrgPolicy::find_confirmed_by_user(user_uuid, conn) {
|
||||
if policy.enabled && policy.has_type(policy_type) {
|
||||
let org_uuid = &policy.org_uuid;
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
return true;
|
||||
}
|
||||
pub async fn is_applicable_to_user(
|
||||
user_uuid: &str,
|
||||
policy_type: OrgPolicyType,
|
||||
exclude_org_uuid: Option<&str>,
|
||||
conn: &DbConn,
|
||||
) -> bool {
|
||||
for policy in
|
||||
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await
|
||||
{
|
||||
// Check if we need to skip this organization.
|
||||
if exclude_org_uuid.is_some() && exclude_org_uuid.unwrap() == policy.org_uuid {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub async fn is_user_allowed(
|
||||
user_uuid: &str,
|
||||
org_uuid: &str,
|
||||
exclude_current_org: bool,
|
||||
conn: &DbConn,
|
||||
) -> OrgPolicyResult {
|
||||
// Enforce TwoFactor/TwoStep login
|
||||
if TwoFactor::find_by_user(user_uuid, conn).await.is_empty() {
|
||||
match Self::find_by_org_and_type(org_uuid, OrgPolicyType::TwoFactorAuthentication, conn).await {
|
||||
Some(p) if p.enabled => {
|
||||
return Err(OrgPolicyErr::TwoFactorMissing);
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
|
||||
// Enforce Single Organization Policy of other organizations user is a member of
|
||||
// This check here needs to exclude this current org-id, else an accepted user can not be confirmed.
|
||||
let exclude_org = if exclude_current_org {
|
||||
Some(org_uuid)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if Self::is_applicable_to_user(user_uuid, OrgPolicyType::SingleOrg, exclude_org, conn).await {
|
||||
return Err(OrgPolicyErr::SingleOrgEnforced);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
|
||||
/// option of the `Send Options` policy, and the user is not an owner or admin of that org.
|
||||
pub fn is_hide_email_disabled(user_uuid: &str, conn: &DbConn) -> bool {
|
||||
for policy in OrgPolicy::find_confirmed_by_user(user_uuid, conn) {
|
||||
if policy.enabled && policy.has_type(OrgPolicyType::SendOptions) {
|
||||
let org_uuid = &policy.org_uuid;
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
if opts.data.DisableHideEmail {
|
||||
return true;
|
||||
}
|
||||
pub async fn is_hide_email_disabled(user_uuid: &str, conn: &DbConn) -> bool {
|
||||
for policy in
|
||||
OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await
|
||||
{
|
||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||
if user.atype < UserOrgType::Admin {
|
||||
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) {
|
||||
Ok(opts) => {
|
||||
if opts.data.DisableHideEmail {
|
||||
return true;
|
||||
}
|
||||
_ => error!("Failed to deserialize policy data: {}", policy.data),
|
||||
}
|
||||
_ => error!("Failed to deserialize SendOptionsPolicyData: {}", policy.data),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/*pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
.map_res("Error deleting twofactors")
|
||||
}}
|
||||
}*/
|
||||
}
|
||||
|
||||
@@ -31,7 +31,9 @@ db_object! {
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/b86a04cef9f1e1b82cf18e49fc94e017c641130c/src/Core/Enums/OrganizationUserStatusType.cs
|
||||
pub enum UserOrgStatus {
|
||||
Revoked = -1,
|
||||
Invited = 0,
|
||||
Accepted = 1,
|
||||
Confirmed = 2,
|
||||
@@ -133,26 +135,29 @@ impl Organization {
|
||||
public_key,
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"Identifier": null, // not supported by us
|
||||
"Name": self.name,
|
||||
"Seats": 10, // The value doesn't matter, we don't check server-side
|
||||
// "MaxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
|
||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
"Use2fa": true,
|
||||
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||
"UseEvents": false, // not supported by us
|
||||
"UseGroups": false, // not supported by us
|
||||
"UseEvents": false, // Not supported
|
||||
"UseGroups": false, // Not supported
|
||||
"UseTotp": true,
|
||||
"UsePolicies": true,
|
||||
"UseSso": false, // We do not support SSO
|
||||
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
"UseSso": false, // Not supported
|
||||
// "UseKeyConnector": false, // Not supported
|
||||
"SelfHost": true,
|
||||
"UseApi": false, // not supported by us
|
||||
"UseApi": false, // Not supported
|
||||
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
|
||||
"ResetPasswordEnrolled": false, // not supported by us
|
||||
"UseResetPassword": false, // Not supported
|
||||
|
||||
"BusinessName": null,
|
||||
"BusinessAddress1": null,
|
||||
@@ -170,6 +175,12 @@ impl Organization {
|
||||
}
|
||||
}
|
||||
|
||||
// Used to either subtract or add to the current status
|
||||
// The number 128 should be fine, it is well within the range of an i32
|
||||
// The same goes for the database where we only use INTEGER (the same as an i32)
|
||||
// It should also provide enough room for 100+ types, which i doubt will ever happen.
|
||||
static ACTIVATE_REVOKE_DIFF: i32 = 128;
|
||||
|
||||
impl UserOrganization {
|
||||
pub fn new(user_uuid: String, org_uuid: String) -> Self {
|
||||
Self {
|
||||
@@ -184,6 +195,18 @@ impl UserOrganization {
|
||||
atype: UserOrgType::User as i32,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn restore(&mut self) {
|
||||
if self.status < UserOrgStatus::Accepted as i32 {
|
||||
self.status += ACTIVATE_REVOKE_DIFF;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn revoke(&mut self) {
|
||||
if self.status > UserOrgStatus::Revoked as i32 {
|
||||
self.status -= ACTIVATE_REVOKE_DIFF;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use crate::db::DbConn;
|
||||
@@ -193,10 +216,10 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Organization {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
UserOrganization::find_by_org(&self.uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in UserOrganization::find_by_org(&self.uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||
}
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -230,13 +253,13 @@ impl Organization {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
use super::{Cipher, Collection};
|
||||
|
||||
Cipher::delete_all_by_organization(&self.uuid, conn)?;
|
||||
Collection::delete_all_by_organization(&self.uuid, conn)?;
|
||||
UserOrganization::delete_all_by_organization(&self.uuid, conn)?;
|
||||
OrgPolicy::delete_all_by_organization(&self.uuid, conn)?;
|
||||
Cipher::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
Collection::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
||||
@@ -245,7 +268,7 @@ impl Organization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
organizations::table
|
||||
.filter(organizations::uuid.eq(uuid))
|
||||
@@ -254,7 +277,7 @@ impl Organization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn get_all(conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn get_all(conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
organizations::table.load::<OrganizationDb>(conn).expect("Error loading organizations").from_db()
|
||||
}}
|
||||
@@ -262,12 +285,13 @@ impl Organization {
|
||||
}
|
||||
|
||||
impl UserOrganization {
|
||||
pub fn to_json(&self, conn: &DbConn) -> Value {
|
||||
let org = Organization::find_by_uuid(&self.org_uuid, conn).unwrap();
|
||||
pub async fn to_json(&self, conn: &DbConn) -> Value {
|
||||
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
|
||||
|
||||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
|
||||
json!({
|
||||
"Id": self.org_uuid,
|
||||
"Identifier": null, // not supported by us
|
||||
"Identifier": null, // Not supported
|
||||
"Name": org.name,
|
||||
"Seats": 10, // The value doesn't matter, we don't check server-side
|
||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||
@@ -275,44 +299,48 @@ impl UserOrganization {
|
||||
|
||||
"Use2fa": true,
|
||||
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||
"UseEvents": false, // not supported by us
|
||||
"UseGroups": false, // not supported by us
|
||||
"UseEvents": false, // Not supported
|
||||
"UseGroups": false, // Not supported
|
||||
"UseTotp": true,
|
||||
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
"UsePolicies": true,
|
||||
"UseApi": false, // not supported by us
|
||||
"UseApi": false, // Not supported
|
||||
"SelfHost": true,
|
||||
"HasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
|
||||
"ResetPasswordEnrolled": false, // not supported by us
|
||||
"SsoBound": false, // We do not support SSO
|
||||
"UseSso": false, // We do not support SSO
|
||||
// TODO: Add support for Business Portal
|
||||
// Upstream is moving Policies and SSO management outside of the web-vault to /portal
|
||||
// For now they still have that code also in the web-vault, but they will remove it at some point.
|
||||
// https://github.com/bitwarden/server/tree/master/bitwarden_license/src/
|
||||
"UseBusinessPortal": false, // Disable BusinessPortal Button
|
||||
"ResetPasswordEnrolled": false, // Not supported
|
||||
"SsoBound": false, // Not supported
|
||||
"UseSso": false, // Not supported
|
||||
"ProviderId": null,
|
||||
"ProviderName": null,
|
||||
// "KeyConnectorEnabled": false,
|
||||
// "KeyConnectorUrl": null,
|
||||
|
||||
// TODO: Add support for Custom User Roles
|
||||
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||
// "Permissions": {
|
||||
// "AccessBusinessPortal": false,
|
||||
// "AccessEventLogs": false,
|
||||
// "AccessEventLogs": false, // Not supported
|
||||
// "AccessImportExport": false,
|
||||
// "AccessReports": false,
|
||||
// "ManageAllCollections": false,
|
||||
// "CreateNewCollections": false,
|
||||
// "EditAnyCollection": false,
|
||||
// "DeleteAnyCollection": false,
|
||||
// "ManageAssignedCollections": false,
|
||||
// "editAssignedCollections": false,
|
||||
// "deleteAssignedCollections": false,
|
||||
// "ManageCiphers": false,
|
||||
// "ManageGroups": false,
|
||||
// "ManageGroups": false, // Not supported
|
||||
// "ManagePolicies": false,
|
||||
// "ManageResetPassword": false,
|
||||
// "ManageSso": false,
|
||||
// "ManageResetPassword": false, // Not supported
|
||||
// "ManageSso": false, // Not supported
|
||||
// "ManageUsers": false,
|
||||
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||
// },
|
||||
|
||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||
|
||||
// These are per user
|
||||
"UserId": self.user_uuid,
|
||||
"Key": self.akey,
|
||||
"Status": self.status,
|
||||
"Type": self.atype,
|
||||
@@ -322,8 +350,16 @@ impl UserOrganization {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_user_details(&self, conn: &DbConn) -> Value {
|
||||
let user = User::find_by_uuid(&self.user_uuid, conn).unwrap();
|
||||
pub async fn to_json_user_details(&self, conn: &DbConn) -> Value {
|
||||
let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap();
|
||||
|
||||
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
||||
// We subtract/add a number so we can restore/activate the user to it's previouse state again.
|
||||
let status = if self.status < UserOrgStatus::Revoked as i32 {
|
||||
UserOrgStatus::Revoked as i32
|
||||
} else {
|
||||
self.status
|
||||
};
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
@@ -331,7 +367,7 @@ impl UserOrganization {
|
||||
"Name": user.name,
|
||||
"Email": user.email,
|
||||
|
||||
"Status": self.status,
|
||||
"Status": status,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
|
||||
@@ -347,11 +383,12 @@ impl UserOrganization {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_details(&self, conn: &DbConn) -> Value {
|
||||
pub async fn to_json_details(&self, conn: &DbConn) -> Value {
|
||||
let coll_uuids = if self.access_all {
|
||||
vec![] // If we have complete access, no need to fill the array
|
||||
} else {
|
||||
let collections = CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn);
|
||||
let collections =
|
||||
CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn).await;
|
||||
collections
|
||||
.iter()
|
||||
.map(|c| {
|
||||
@@ -364,11 +401,19 @@ impl UserOrganization {
|
||||
.collect()
|
||||
};
|
||||
|
||||
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
|
||||
// We subtract/add a number so we can restore/activate the user to it's previouse state again.
|
||||
let status = if self.status < UserOrgStatus::Revoked as i32 {
|
||||
UserOrgStatus::Revoked as i32
|
||||
} else {
|
||||
self.status
|
||||
};
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
"UserId": self.user_uuid,
|
||||
|
||||
"Status": self.status,
|
||||
"Status": status,
|
||||
"Type": self.atype,
|
||||
"AccessAll": self.access_all,
|
||||
"Collections": coll_uuids,
|
||||
@@ -376,8 +421,8 @@ impl UserOrganization {
|
||||
"Object": "organizationUserDetails",
|
||||
})
|
||||
}
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn).await;
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -410,10 +455,10 @@ impl UserOrganization {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn).await;
|
||||
|
||||
CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn)?;
|
||||
CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(users_organizations::table.filter(users_organizations::uuid.eq(self.uuid)))
|
||||
@@ -422,23 +467,23 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in Self::find_by_org(org_uuid, conn) {
|
||||
user_org.delete(conn)?;
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in Self::find_by_org(org_uuid, conn).await {
|
||||
user_org.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in Self::find_any_state_by_user(user_uuid, conn) {
|
||||
user_org.delete(conn)?;
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in Self::find_any_state_by_user(user_uuid, conn).await {
|
||||
user_org.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_email_and_org(email: &str, org_id: &str, conn: &DbConn) -> Option<UserOrganization> {
|
||||
if let Some(user) = super::User::find_by_mail(email, conn) {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn) {
|
||||
pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &DbConn) -> Option<UserOrganization> {
|
||||
if let Some(user) = super::User::find_by_mail(email, conn).await {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await {
|
||||
return Some(user_org);
|
||||
}
|
||||
}
|
||||
@@ -458,7 +503,7 @@ impl UserOrganization {
|
||||
(self.access_all || self.atype >= UserOrgType::Admin) && self.has_status(UserOrgStatus::Confirmed)
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::uuid.eq(uuid))
|
||||
@@ -467,7 +512,7 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::uuid.eq(uuid))
|
||||
@@ -477,7 +522,7 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
@@ -487,7 +532,7 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
@@ -497,7 +542,7 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_any_state_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_any_state_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
@@ -506,7 +551,19 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn count_accepted_and_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.filter(users_organizations::status.eq(UserOrgStatus::Accepted as i32))
|
||||
.or_filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.unwrap_or(0)
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
@@ -515,7 +572,7 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
@@ -526,17 +583,29 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_org_and_type(org_uuid: &str, atype: UserOrgType, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.filter(users_organizations::atype.eq(atype))
|
||||
.filter(users_organizations::atype.eq(atype as i32))
|
||||
.load::<UserOrganizationDb>(conn)
|
||||
.expect("Error loading user organizations").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn count_confirmed_by_org_and_type(org_uuid: &str, atype: UserOrgType, conn: &DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
.filter(users_organizations::atype.eq(atype as i32))
|
||||
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||
.count()
|
||||
.first::<i64>(conn)
|
||||
.unwrap_or(0)
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
@@ -546,7 +615,16 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.load::<UserOrganizationDb>(conn)
|
||||
.expect("Error loading user organizations").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.inner_join(
|
||||
@@ -565,7 +643,7 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
@@ -587,7 +665,7 @@ impl UserOrganization {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_organizations::table
|
||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{Organization, User};
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "sends"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Organization, foreign_key = "organization_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
pub struct Send {
|
||||
pub uuid: String,
|
||||
@@ -103,7 +101,7 @@ impl Send {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn creator_identifier(&self, conn: &DbConn) -> Option<String> {
|
||||
pub async fn creator_identifier(&self, conn: &DbConn) -> Option<String> {
|
||||
if let Some(hide_email) = self.hide_email {
|
||||
if hide_email {
|
||||
return None;
|
||||
@@ -111,7 +109,7 @@ impl Send {
|
||||
}
|
||||
|
||||
if let Some(user_uuid) = &self.user_uuid {
|
||||
if let Some(user) = User::find_by_uuid(user_uuid, conn) {
|
||||
if let Some(user) = User::find_by_uuid(user_uuid, conn).await {
|
||||
return Some(user.email);
|
||||
}
|
||||
}
|
||||
@@ -150,7 +148,7 @@ impl Send {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_access(&self, conn: &DbConn) -> Value {
|
||||
pub async fn to_json_access(&self, conn: &DbConn) -> Value {
|
||||
use crate::util::format_date;
|
||||
|
||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
||||
@@ -164,7 +162,7 @@ impl Send {
|
||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||
|
||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
||||
"CreatorIdentifier": self.creator_identifier(conn),
|
||||
"CreatorIdentifier": self.creator_identifier(conn).await,
|
||||
"Object": "send-access",
|
||||
})
|
||||
}
|
||||
@@ -176,8 +174,8 @@ use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
|
||||
impl Send {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
self.revision_date = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
@@ -211,8 +209,8 @@ impl Send {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
|
||||
if self.atype == SendType::File as i32 {
|
||||
std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok();
|
||||
@@ -226,17 +224,17 @@ impl Send {
|
||||
}
|
||||
|
||||
/// Purge all sends that are past their deletion date.
|
||||
pub fn purge(conn: &DbConn) {
|
||||
for send in Self::find_by_past_deletion_date(conn) {
|
||||
send.delete(conn).ok();
|
||||
pub async fn purge(conn: &DbConn) {
|
||||
for send in Self::find_by_past_deletion_date(conn).await {
|
||||
send.delete(conn).await.ok();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
|
||||
pub async fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
|
||||
let mut user_uuids = Vec::new();
|
||||
match &self.user_uuid {
|
||||
Some(user_uuid) => {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
user_uuids.push(user_uuid.clone())
|
||||
}
|
||||
None => {
|
||||
@@ -246,14 +244,14 @@ impl Send {
|
||||
user_uuids
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for send in Self::find_by_user(user_uuid, conn) {
|
||||
send.delete(conn)?;
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for send in Self::find_by_user(user_uuid, conn).await {
|
||||
send.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option<Self> {
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -267,10 +265,10 @@ impl Send {
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
Self::find_by_uuid(&uuid, conn)
|
||||
Self::find_by_uuid(&uuid, conn).await
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::uuid.eq(uuid))
|
||||
@@ -280,7 +278,7 @@ impl Send {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::user_uuid.eq(user_uuid))
|
||||
@@ -288,7 +286,7 @@ impl Send {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
.filter(sends::organization_uuid.eq(org_uuid))
|
||||
@@ -296,7 +294,7 @@ impl Send {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_past_deletion_date(conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_past_deletion_date(conn: &DbConn) -> Vec<Self> {
|
||||
let now = Utc::now().naive_utc();
|
||||
db_run! {conn: {
|
||||
sends::table
|
||||
|
||||
@@ -2,12 +2,9 @@ use serde_json::Value;
|
||||
|
||||
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "twofactor"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
pub struct TwoFactor {
|
||||
pub uuid: String,
|
||||
@@ -71,7 +68,7 @@ impl TwoFactor {
|
||||
|
||||
/// Database methods
|
||||
impl TwoFactor {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
match diesel::replace_into(twofactor::table)
|
||||
@@ -110,7 +107,7 @@ impl TwoFactor {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor::table.filter(twofactor::uuid.eq(self.uuid)))
|
||||
.execute(conn)
|
||||
@@ -118,7 +115,7 @@ impl TwoFactor {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
twofactor::table
|
||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||
@@ -129,7 +126,7 @@ impl TwoFactor {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
twofactor::table
|
||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||
@@ -140,7 +137,7 @@ impl TwoFactor {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
@@ -148,7 +145,7 @@ impl TwoFactor {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn migrate_u2f_to_webauthn(conn: &DbConn) -> EmptyResult {
|
||||
pub async fn migrate_u2f_to_webauthn(conn: &DbConn) -> EmptyResult {
|
||||
let u2f_factors = db_run! { conn: {
|
||||
twofactor::table
|
||||
.filter(twofactor::atype.eq(TwoFactorType::U2f as i32))
|
||||
@@ -157,7 +154,7 @@ impl TwoFactor {
|
||||
.from_db()
|
||||
}};
|
||||
|
||||
use crate::api::core::two_factor::u2f::U2FRegistration;
|
||||
use crate::api::core::two_factor::webauthn::U2FRegistration;
|
||||
use crate::api::core::two_factor::webauthn::{get_webauthn_registrations, WebauthnRegistration};
|
||||
use webauthn_rs::proto::*;
|
||||
|
||||
@@ -168,7 +165,7 @@ impl TwoFactor {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (_, mut webauthn_regs) = get_webauthn_registrations(&u2f.user_uuid, conn)?;
|
||||
let (_, mut webauthn_regs) = get_webauthn_registrations(&u2f.user_uuid, conn).await?;
|
||||
|
||||
// If the user already has webauthn registrations saved, don't overwrite them
|
||||
if !webauthn_regs.is_empty() {
|
||||
@@ -207,10 +204,11 @@ impl TwoFactor {
|
||||
}
|
||||
|
||||
u2f.data = serde_json::to_string(®s)?;
|
||||
u2f.save(conn)?;
|
||||
u2f.save(conn).await?;
|
||||
|
||||
TwoFactor::new(u2f.user_uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(&webauthn_regs)?)
|
||||
.save(conn)?;
|
||||
.save(conn)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -2,12 +2,9 @@ use chrono::{NaiveDateTime, Utc};
|
||||
|
||||
use crate::{api::EmptyResult, auth::ClientIp, db::DbConn, error::MapResult, CONFIG};
|
||||
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[table_name = "twofactor_incomplete"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[primary_key(user_uuid, device_uuid)]
|
||||
pub struct TwoFactorIncomplete {
|
||||
pub user_uuid: String,
|
||||
@@ -22,7 +19,7 @@ db_object! {
|
||||
}
|
||||
|
||||
impl TwoFactorIncomplete {
|
||||
pub fn mark_incomplete(
|
||||
pub async fn mark_incomplete(
|
||||
user_uuid: &str,
|
||||
device_uuid: &str,
|
||||
device_name: &str,
|
||||
@@ -36,7 +33,7 @@ impl TwoFactorIncomplete {
|
||||
// Don't update the data for an existing user/device pair, since that
|
||||
// would allow an attacker to arbitrarily delay notifications by
|
||||
// sending repeated 2FA attempts to reset the timer.
|
||||
let existing = Self::find_by_user_and_device(user_uuid, device_uuid, conn);
|
||||
let existing = Self::find_by_user_and_device(user_uuid, device_uuid, conn).await;
|
||||
if existing.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -55,15 +52,15 @@ impl TwoFactorIncomplete {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Self::delete_by_user_and_device(user_uuid, device_uuid, conn)
|
||||
Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await
|
||||
}
|
||||
|
||||
pub fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
twofactor_incomplete::table
|
||||
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
|
||||
@@ -74,7 +71,7 @@ impl TwoFactorIncomplete {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_logins_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_logins_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
twofactor_incomplete::table
|
||||
.filter(twofactor_incomplete::login_time.lt(dt))
|
||||
@@ -84,11 +81,11 @@ impl TwoFactorIncomplete {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn)
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await
|
||||
}
|
||||
|
||||
pub fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor_incomplete::table
|
||||
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
|
||||
@@ -98,7 +95,7 @@ impl TwoFactorIncomplete {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
|
||||
@@ -171,7 +171,7 @@ impl User {
|
||||
pub fn set_stamp_exception(&mut self, route_exception: Vec<String>) {
|
||||
let stamp_exception = UserStampException {
|
||||
routes: route_exception,
|
||||
security_stamp: self.security_stamp.to_string(),
|
||||
security_stamp: self.security_stamp.clone(),
|
||||
expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(),
|
||||
};
|
||||
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
||||
@@ -192,12 +192,20 @@ use crate::db::DbConn;
|
||||
use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
|
||||
use futures::{stream, stream::StreamExt};
|
||||
|
||||
/// Database methods
|
||||
impl User {
|
||||
pub fn to_json(&self, conn: &DbConn) -> Value {
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&self.uuid, conn);
|
||||
let orgs_json: Vec<Value> = orgs.iter().map(|c| c.to_json(conn)).collect();
|
||||
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).is_empty();
|
||||
pub async fn to_json(&self, conn: &DbConn) -> Value {
|
||||
let orgs_json = stream::iter(UserOrganization::find_confirmed_by_user(&self.uuid, conn).await)
|
||||
.then(|c| async {
|
||||
let c = c; // Move out this single variable
|
||||
c.to_json(conn).await
|
||||
})
|
||||
.collect::<Vec<Value>>()
|
||||
.await;
|
||||
|
||||
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).await.is_empty();
|
||||
|
||||
// TODO: Might want to save the status field in the DB
|
||||
let status = if self.password_hash.is_empty() {
|
||||
@@ -227,7 +235,7 @@ impl User {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
if self.email.trim().is_empty() {
|
||||
err!("User email can't be empty")
|
||||
}
|
||||
@@ -265,26 +273,26 @@ impl User {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn) {
|
||||
if user_org.atype == UserOrgType::Owner {
|
||||
let owner_type = UserOrgType::Owner as i32;
|
||||
if UserOrganization::find_by_org_and_type(&user_org.org_uuid, owner_type, conn).len() <= 1 {
|
||||
err!("Can't delete last owner")
|
||||
}
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await {
|
||||
if user_org.atype == UserOrgType::Owner
|
||||
&& UserOrganization::count_confirmed_by_org_and_type(&user_org.org_uuid, UserOrgType::Owner, conn).await
|
||||
<= 1
|
||||
{
|
||||
err!("Can't delete last owner")
|
||||
}
|
||||
}
|
||||
|
||||
Send::delete_all_by_user(&self.uuid, conn)?;
|
||||
EmergencyAccess::delete_all_by_user(&self.uuid, conn)?;
|
||||
UserOrganization::delete_all_by_user(&self.uuid, conn)?;
|
||||
Cipher::delete_all_by_user(&self.uuid, conn)?;
|
||||
Favorite::delete_all_by_user(&self.uuid, conn)?;
|
||||
Folder::delete_all_by_user(&self.uuid, conn)?;
|
||||
Device::delete_all_by_user(&self.uuid, conn)?;
|
||||
TwoFactor::delete_all_by_user(&self.uuid, conn)?;
|
||||
TwoFactorIncomplete::delete_all_by_user(&self.uuid, conn)?;
|
||||
Invitation::take(&self.email, conn); // Delete invitation if any
|
||||
Send::delete_all_by_user(&self.uuid, conn).await?;
|
||||
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
||||
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Folder::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Device::delete_all_by_user(&self.uuid, conn).await?;
|
||||
TwoFactor::delete_all_by_user(&self.uuid, conn).await?;
|
||||
TwoFactorIncomplete::delete_all_by_user(&self.uuid, conn).await?;
|
||||
Invitation::take(&self.email, conn).await; // Delete invitation if any
|
||||
|
||||
db_run! {conn: {
|
||||
diesel::delete(users::table.filter(users::uuid.eq(self.uuid)))
|
||||
@@ -293,13 +301,13 @@ impl User {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn update_uuid_revision(uuid: &str, conn: &DbConn) {
|
||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn) {
|
||||
pub async fn update_uuid_revision(uuid: &str, conn: &DbConn) {
|
||||
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
|
||||
warn!("Failed to update revision for {}: {:#?}", uuid, e);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_all_revisions(conn: &DbConn) -> EmptyResult {
|
||||
pub async fn update_all_revisions(conn: &DbConn) -> EmptyResult {
|
||||
let updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! {conn: {
|
||||
@@ -312,13 +320,13 @@ impl User {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn update_revision(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn update_revision(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
Self::_update_revision(&self.uuid, &self.updated_at, conn)
|
||||
Self::_update_revision(&self.uuid, &self.updated_at, conn).await
|
||||
}
|
||||
|
||||
fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult {
|
||||
async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult {
|
||||
db_run! {conn: {
|
||||
crate::util::retry(|| {
|
||||
diesel::update(users::table.filter(users::uuid.eq(uuid)))
|
||||
@@ -329,7 +337,7 @@ impl User {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
||||
let lower_mail = mail.to_lowercase();
|
||||
db_run! {conn: {
|
||||
users::table
|
||||
@@ -340,20 +348,20 @@ impl User {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
users::table.filter(users::uuid.eq(uuid)).first::<UserDb>(conn).ok().from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn get_all(conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn get_all(conn: &DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
users::table.load::<UserDb>(conn).expect("Error loading users").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn last_active(&self, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||
match Device::find_latest_active_by_user(&self.uuid, conn) {
|
||||
pub async fn last_active(&self, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||
match Device::find_latest_active_by_user(&self.uuid, conn).await {
|
||||
Some(device) => Some(device.updated_at),
|
||||
None => None,
|
||||
}
|
||||
@@ -368,7 +376,7 @@ impl Invitation {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
if self.email.trim().is_empty() {
|
||||
err!("Invitation email can't be empty")
|
||||
}
|
||||
@@ -393,7 +401,7 @@ impl Invitation {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
db_run! {conn: {
|
||||
diesel::delete(invitations::table.filter(invitations::email.eq(self.email)))
|
||||
.execute(conn)
|
||||
@@ -401,7 +409,7 @@ impl Invitation {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
||||
let lower_mail = mail.to_lowercase();
|
||||
db_run! {conn: {
|
||||
invitations::table
|
||||
@@ -412,9 +420,9 @@ impl Invitation {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn take(mail: &str, conn: &DbConn) -> bool {
|
||||
match Self::find_by_mail(mail, conn) {
|
||||
Some(invitation) => invitation.delete(conn).is_ok(),
|
||||
pub async fn take(mail: &str, conn: &DbConn) -> bool {
|
||||
match Self::find_by_mail(mail, conn).await {
|
||||
Some(invitation) => invitation.delete(conn).await.is_ok(),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ table! {
|
||||
}
|
||||
|
||||
table! {
|
||||
devices (uuid) {
|
||||
devices (uuid, user_uuid) {
|
||||
uuid -> Text,
|
||||
created_at -> Datetime,
|
||||
updated_at -> Datetime,
|
||||
|
||||
@@ -42,7 +42,7 @@ table! {
|
||||
}
|
||||
|
||||
table! {
|
||||
devices (uuid) {
|
||||
devices (uuid, user_uuid) {
|
||||
uuid -> Text,
|
||||
created_at -> Timestamp,
|
||||
updated_at -> Timestamp,
|
||||
|
||||
@@ -42,7 +42,7 @@ table! {
|
||||
}
|
||||
|
||||
table! {
|
||||
devices (uuid) {
|
||||
devices (uuid, user_uuid) {
|
||||
uuid -> Text,
|
||||
created_at -> Timestamp,
|
||||
updated_at -> Timestamp,
|
||||
|
||||
34
src/error.rs
34
src/error.rs
@@ -24,7 +24,7 @@ macro_rules! make_error {
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match &self.error {$(
|
||||
ErrorKind::$name(e) => f.write_str(&$usr_msg_fun(e, &self.message)),
|
||||
)+}
|
||||
@@ -45,10 +45,11 @@ use lettre::transport::smtp::Error as SmtpErr;
|
||||
use openssl::error::ErrorStack as SSLErr;
|
||||
use regex::Error as RegexErr;
|
||||
use reqwest::Error as ReqErr;
|
||||
use rocket::error::Error as RocketErr;
|
||||
use serde_json::{Error as SerdeErr, Value};
|
||||
use std::io::Error as IoErr;
|
||||
use std::time::SystemTimeError as TimeErr;
|
||||
use u2f::u2ferror::U2fError as U2fErr;
|
||||
use tokio_tungstenite::tungstenite::Error as TungstError;
|
||||
use webauthn_rs::error::WebauthnError as WebauthnErr;
|
||||
use yubico::yubicoerror::YubicoError as YubiErr;
|
||||
|
||||
@@ -69,7 +70,6 @@ make_error! {
|
||||
Json(Value): _no_source, _serialize,
|
||||
Db(DieselErr): _has_source, _api_error,
|
||||
R2d2(R2d2Err): _has_source, _api_error,
|
||||
U2f(U2fErr): _has_source, _api_error,
|
||||
Serde(SerdeErr): _has_source, _api_error,
|
||||
JWt(JwtErr): _has_source, _api_error,
|
||||
Handlebars(HbErr): _has_source, _api_error,
|
||||
@@ -84,14 +84,16 @@ make_error! {
|
||||
Address(AddrErr): _has_source, _api_error,
|
||||
Smtp(SmtpErr): _has_source, _api_error,
|
||||
OpenSSL(SSLErr): _has_source, _api_error,
|
||||
Rocket(RocketErr): _has_source, _api_error,
|
||||
|
||||
DieselCon(DieselConErr): _has_source, _api_error,
|
||||
DieselMig(DieselMigErr): _has_source, _api_error,
|
||||
Webauthn(WebauthnErr): _has_source, _api_error,
|
||||
WebSocket(TungstError): _has_source, _api_error,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.source() {
|
||||
Some(e) => write!(f, "{}.\n[CAUSE] {:#?}", self.message, e),
|
||||
None => match self.error {
|
||||
@@ -193,8 +195,8 @@ use rocket::http::{ContentType, Status};
|
||||
use rocket::request::Request;
|
||||
use rocket::response::{self, Responder, Response};
|
||||
|
||||
impl<'r> Responder<'r> for Error {
|
||||
fn respond_to(self, _: &Request) -> response::Result<'r> {
|
||||
impl<'r> Responder<'r, 'static> for Error {
|
||||
fn respond_to(self, _: &Request<'_>) -> response::Result<'static> {
|
||||
match self.error {
|
||||
ErrorKind::Empty(_) => {} // Don't print the error in this situation
|
||||
ErrorKind::Simple(_) => {} // Don't print the error in this situation
|
||||
@@ -202,8 +204,8 @@ impl<'r> Responder<'r> for Error {
|
||||
};
|
||||
|
||||
let code = Status::from_code(self.error_code).unwrap_or(Status::BadRequest);
|
||||
|
||||
Response::build().status(code).header(ContentType::JSON).sized_body(Cursor::new(format!("{}", self))).ok()
|
||||
let body = self.to_string();
|
||||
Response::build().status(code).header(ContentType::JSON).sized_body(Some(body.len()), Cursor::new(body)).ok()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,20 +216,20 @@ impl<'r> Responder<'r> for Error {
|
||||
macro_rules! err {
|
||||
($msg:expr) => {{
|
||||
error!("{}", $msg);
|
||||
return Err(crate::error::Error::new($msg, $msg));
|
||||
return Err($crate::error::Error::new($msg, $msg));
|
||||
}};
|
||||
($usr_msg:expr, $log_value:expr) => {{
|
||||
error!("{}. {}", $usr_msg, $log_value);
|
||||
return Err(crate::error::Error::new($usr_msg, $log_value));
|
||||
return Err($crate::error::Error::new($usr_msg, $log_value));
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! err_silent {
|
||||
($msg:expr) => {{
|
||||
return Err(crate::error::Error::new($msg, $msg));
|
||||
return Err($crate::error::Error::new($msg, $msg));
|
||||
}};
|
||||
($usr_msg:expr, $log_value:expr) => {{
|
||||
return Err(crate::error::Error::new($usr_msg, $log_value));
|
||||
return Err($crate::error::Error::new($usr_msg, $log_value));
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -235,11 +237,11 @@ macro_rules! err_silent {
|
||||
macro_rules! err_code {
|
||||
($msg:expr, $err_code: expr) => {{
|
||||
error!("{}", $msg);
|
||||
return Err(crate::error::Error::new($msg, $msg).with_code($err_code));
|
||||
return Err($crate::error::Error::new($msg, $msg).with_code($err_code));
|
||||
}};
|
||||
($usr_msg:expr, $log_value:expr, $err_code: expr) => {{
|
||||
error!("{}. {}", $usr_msg, $log_value);
|
||||
return Err(crate::error::Error::new($usr_msg, $log_value).with_code($err_code));
|
||||
return Err($crate::error::Error::new($usr_msg, $log_value).with_code($err_code));
|
||||
}};
|
||||
}
|
||||
|
||||
@@ -247,11 +249,11 @@ macro_rules! err_code {
|
||||
macro_rules! err_discard {
|
||||
($msg:expr, $data:expr) => {{
|
||||
std::io::copy(&mut $data.open(), &mut std::io::sink()).ok();
|
||||
return Err(crate::error::Error::new($msg, $msg));
|
||||
return Err($crate::error::Error::new($msg, $msg));
|
||||
}};
|
||||
($usr_msg:expr, $log_value:expr, $data:expr) => {{
|
||||
std::io::copy(&mut $data.open(), &mut std::io::sink()).ok();
|
||||
return Err(crate::error::Error::new($usr_msg, $log_value));
|
||||
return Err($crate::error::Error::new($usr_msg, $log_value));
|
||||
}};
|
||||
}
|
||||
|
||||
|
||||
136
src/mail.rs
136
src/mail.rs
@@ -4,11 +4,11 @@ use chrono::NaiveDateTime;
|
||||
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
|
||||
|
||||
use lettre::{
|
||||
message::{header, Mailbox, Message, MultiPart, SinglePart},
|
||||
message::{Mailbox, Message, MultiPart},
|
||||
transport::smtp::authentication::{Credentials, Mechanism as SmtpAuthMechanism},
|
||||
transport::smtp::client::{Tls, TlsParameters},
|
||||
transport::smtp::extension::ClientId,
|
||||
Address, SmtpTransport, Transport,
|
||||
Address, AsyncSmtpTransport, AsyncTransport, Tokio1Executor,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -21,16 +21,16 @@ use crate::{
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
fn mailer() -> SmtpTransport {
|
||||
fn mailer() -> AsyncSmtpTransport<Tokio1Executor> {
|
||||
use std::time::Duration;
|
||||
let host = CONFIG.smtp_host().unwrap();
|
||||
|
||||
let smtp_client = SmtpTransport::builder_dangerous(host.as_str())
|
||||
let smtp_client = AsyncSmtpTransport::<Tokio1Executor>::builder_dangerous(host.as_str())
|
||||
.port(CONFIG.smtp_port())
|
||||
.timeout(Some(Duration::from_secs(CONFIG.smtp_timeout())));
|
||||
|
||||
// Determine security
|
||||
let smtp_client = if CONFIG.smtp_ssl() || CONFIG.smtp_explicit_tls() {
|
||||
let smtp_client = if CONFIG.smtp_security() != *"off" {
|
||||
let mut tls_parameters = TlsParameters::builder(host);
|
||||
if CONFIG.smtp_accept_invalid_hostnames() {
|
||||
tls_parameters = tls_parameters.dangerous_accept_invalid_hostnames(true);
|
||||
@@ -40,7 +40,7 @@ fn mailer() -> SmtpTransport {
|
||||
}
|
||||
let tls_parameters = tls_parameters.build().unwrap();
|
||||
|
||||
if CONFIG.smtp_explicit_tls() {
|
||||
if CONFIG.smtp_security() == *"force_tls" {
|
||||
smtp_client.tls(Tls::Wrapper(tls_parameters))
|
||||
} else {
|
||||
smtp_client.tls(Tls::Required(tls_parameters))
|
||||
@@ -110,7 +110,7 @@ fn get_template(template_name: &str, data: &serde_json::Value) -> Result<(String
|
||||
Ok((subject, body))
|
||||
}
|
||||
|
||||
pub fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult {
|
||||
pub async fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult {
|
||||
let template_name = if hint.is_some() {
|
||||
"email/pw_hint_some"
|
||||
} else {
|
||||
@@ -119,10 +119,10 @@ pub fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult {
|
||||
|
||||
let (subject, body_html, body_text) = get_text(template_name, json!({ "hint": hint, "url": CONFIG.domain() }))?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
|
||||
pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
|
||||
let claims = generate_delete_claims(uuid.to_string());
|
||||
let delete_token = encode_jwt(&claims);
|
||||
|
||||
@@ -136,10 +136,10 @@ pub fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_verify_email(address: &str, uuid: &str) -> EmptyResult {
|
||||
pub async fn send_verify_email(address: &str, uuid: &str) -> EmptyResult {
|
||||
let claims = generate_verify_email_claims(uuid.to_string());
|
||||
let verify_email_token = encode_jwt(&claims);
|
||||
|
||||
@@ -153,10 +153,10 @@ pub fn send_verify_email(address: &str, uuid: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_welcome(address: &str) -> EmptyResult {
|
||||
pub async fn send_welcome(address: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/welcome",
|
||||
json!({
|
||||
@@ -164,10 +164,10 @@ pub fn send_welcome(address: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult {
|
||||
pub async fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult {
|
||||
let claims = generate_verify_email_claims(uuid.to_string());
|
||||
let verify_email_token = encode_jwt(&claims);
|
||||
|
||||
@@ -180,10 +180,10 @@ pub fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_2fa_removed_from_org(address: &str, org_name: &str) -> EmptyResult {
|
||||
pub async fn send_2fa_removed_from_org(address: &str, org_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/send_2fa_removed_from_org",
|
||||
json!({
|
||||
@@ -192,10 +192,10 @@ pub fn send_2fa_removed_from_org(address: &str, org_name: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_single_org_removed_from_org(address: &str, org_name: &str) -> EmptyResult {
|
||||
pub async fn send_single_org_removed_from_org(address: &str, org_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/send_single_org_removed_from_org",
|
||||
json!({
|
||||
@@ -204,10 +204,10 @@ pub fn send_single_org_removed_from_org(address: &str, org_name: &str) -> EmptyR
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_invite(
|
||||
pub async fn send_invite(
|
||||
address: &str,
|
||||
uuid: &str,
|
||||
org_id: Option<String>,
|
||||
@@ -236,10 +236,10 @@ pub fn send_invite(
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_emergency_access_invite(
|
||||
pub async fn send_emergency_access_invite(
|
||||
address: &str,
|
||||
uuid: &str,
|
||||
emer_id: Option<String>,
|
||||
@@ -267,10 +267,10 @@ pub fn send_emergency_access_invite(
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_emergency_access_invite_accepted(address: &str, grantee_email: &str) -> EmptyResult {
|
||||
pub async fn send_emergency_access_invite_accepted(address: &str, grantee_email: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/emergency_access_invite_accepted",
|
||||
json!({
|
||||
@@ -279,10 +279,10 @@ pub fn send_emergency_access_invite_accepted(address: &str, grantee_email: &str)
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_emergency_access_invite_confirmed(address: &str, grantor_name: &str) -> EmptyResult {
|
||||
pub async fn send_emergency_access_invite_confirmed(address: &str, grantor_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/emergency_access_invite_confirmed",
|
||||
json!({
|
||||
@@ -291,10 +291,10 @@ pub fn send_emergency_access_invite_confirmed(address: &str, grantor_name: &str)
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_emergency_access_recovery_approved(address: &str, grantor_name: &str) -> EmptyResult {
|
||||
pub async fn send_emergency_access_recovery_approved(address: &str, grantor_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/emergency_access_recovery_approved",
|
||||
json!({
|
||||
@@ -303,10 +303,10 @@ pub fn send_emergency_access_recovery_approved(address: &str, grantor_name: &str
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_emergency_access_recovery_initiated(
|
||||
pub async fn send_emergency_access_recovery_initiated(
|
||||
address: &str,
|
||||
grantee_name: &str,
|
||||
atype: &str,
|
||||
@@ -322,10 +322,10 @@ pub fn send_emergency_access_recovery_initiated(
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_emergency_access_recovery_reminder(
|
||||
pub async fn send_emergency_access_recovery_reminder(
|
||||
address: &str,
|
||||
grantee_name: &str,
|
||||
atype: &str,
|
||||
@@ -341,10 +341,10 @@ pub fn send_emergency_access_recovery_reminder(
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_emergency_access_recovery_rejected(address: &str, grantor_name: &str) -> EmptyResult {
|
||||
pub async fn send_emergency_access_recovery_rejected(address: &str, grantor_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/emergency_access_recovery_rejected",
|
||||
json!({
|
||||
@@ -353,10 +353,10 @@ pub fn send_emergency_access_recovery_rejected(address: &str, grantor_name: &str
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_emergency_access_recovery_timed_out(address: &str, grantee_name: &str, atype: &str) -> EmptyResult {
|
||||
pub async fn send_emergency_access_recovery_timed_out(address: &str, grantee_name: &str, atype: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/emergency_access_recovery_timed_out",
|
||||
json!({
|
||||
@@ -366,10 +366,10 @@ pub fn send_emergency_access_recovery_timed_out(address: &str, grantee_name: &st
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_invite_accepted(new_user_email: &str, address: &str, org_name: &str) -> EmptyResult {
|
||||
pub async fn send_invite_accepted(new_user_email: &str, address: &str, org_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/invite_accepted",
|
||||
json!({
|
||||
@@ -379,10 +379,10 @@ pub fn send_invite_accepted(new_user_email: &str, address: &str, org_name: &str)
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult {
|
||||
pub async fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/invite_confirmed",
|
||||
json!({
|
||||
@@ -391,10 +391,10 @@ pub fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
|
||||
pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
|
||||
use crate::util::upcase_first;
|
||||
let device = upcase_first(device);
|
||||
|
||||
@@ -409,10 +409,10 @@ pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, de
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_incomplete_2fa_login(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
|
||||
pub async fn send_incomplete_2fa_login(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
|
||||
use crate::util::upcase_first;
|
||||
let device = upcase_first(device);
|
||||
|
||||
@@ -428,10 +428,10 @@ pub fn send_incomplete_2fa_login(address: &str, ip: &str, dt: &NaiveDateTime, de
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_token(address: &str, token: &str) -> EmptyResult {
|
||||
pub async fn send_token(address: &str, token: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/twofactor_email",
|
||||
json!({
|
||||
@@ -440,10 +440,10 @@ pub fn send_token(address: &str, token: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_change_email(address: &str, token: &str) -> EmptyResult {
|
||||
pub async fn send_change_email(address: &str, token: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/change_email",
|
||||
json!({
|
||||
@@ -452,10 +452,10 @@ pub fn send_change_email(address: &str, token: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
pub fn send_test(address: &str) -> EmptyResult {
|
||||
pub async fn send_test(address: &str) -> EmptyResult {
|
||||
let (subject, body_html, body_text) = get_text(
|
||||
"email/smtp_test",
|
||||
json!({
|
||||
@@ -463,43 +463,19 @@ pub fn send_test(address: &str) -> EmptyResult {
|
||||
}),
|
||||
)?;
|
||||
|
||||
send_email(address, &subject, body_html, body_text)
|
||||
send_email(address, &subject, body_html, body_text).await
|
||||
}
|
||||
|
||||
fn send_email(address: &str, subject: &str, body_html: String, body_text: String) -> EmptyResult {
|
||||
let address_split: Vec<&str> = address.rsplitn(2, '@').collect();
|
||||
if address_split.len() != 2 {
|
||||
err!("Invalid email address (no @)");
|
||||
}
|
||||
|
||||
let domain_puny = match idna::domain_to_ascii_strict(address_split[0]) {
|
||||
Ok(d) => d,
|
||||
Err(_) => err!("Can't convert email domain to ASCII representation"),
|
||||
};
|
||||
|
||||
let address = format!("{}@{}", address_split[1], domain_puny);
|
||||
|
||||
let html = SinglePart::builder()
|
||||
// We force Base64 encoding because in the past we had issues with different encodings.
|
||||
.header(header::ContentTransferEncoding::Base64)
|
||||
.header(header::ContentType::TEXT_HTML)
|
||||
.body(body_html);
|
||||
|
||||
let text = SinglePart::builder()
|
||||
// We force Base64 encoding because in the past we had issues with different encodings.
|
||||
.header(header::ContentTransferEncoding::Base64)
|
||||
.header(header::ContentType::TEXT_PLAIN)
|
||||
.body(body_text);
|
||||
|
||||
async fn send_email(address: &str, subject: &str, body_html: String, body_text: String) -> EmptyResult {
|
||||
let smtp_from = &CONFIG.smtp_from();
|
||||
let email = Message::builder()
|
||||
.message_id(Some(format!("<{}@{}>", crate::util::get_uuid(), smtp_from.split('@').collect::<Vec<&str>>()[1])))
|
||||
.to(Mailbox::new(None, Address::from_str(&address)?))
|
||||
.to(Mailbox::new(None, Address::from_str(address)?))
|
||||
.from(Mailbox::new(Some(CONFIG.smtp_from_name()), Address::from_str(smtp_from)?))
|
||||
.subject(subject)
|
||||
.multipart(MultiPart::alternative().singlepart(text).singlepart(html))?;
|
||||
.multipart(MultiPart::alternative_plain_html(body_text, body_html))?;
|
||||
|
||||
match mailer().send(&email) {
|
||||
match mailer().send(email).await {
|
||||
Ok(_) => Ok(()),
|
||||
// Match some common errors and make them more user friendly
|
||||
Err(e) => {
|
||||
|
||||
212
src/main.rs
212
src/main.rs
@@ -1,4 +1,30 @@
|
||||
#![forbid(unsafe_code)]
|
||||
#![forbid(unsafe_code, non_ascii_idents)]
|
||||
#![deny(
|
||||
rust_2018_idioms,
|
||||
rust_2021_compatibility,
|
||||
noop_method_call,
|
||||
pointer_structural_match,
|
||||
trivial_casts,
|
||||
trivial_numeric_casts,
|
||||
unused_import_braces,
|
||||
clippy::cast_lossless,
|
||||
clippy::clone_on_ref_ptr,
|
||||
clippy::equatable_if_let,
|
||||
clippy::float_cmp_const,
|
||||
clippy::inefficient_to_string,
|
||||
clippy::linkedlist,
|
||||
clippy::macro_use_imports,
|
||||
clippy::manual_assert,
|
||||
clippy::match_wildcard_for_single_variants,
|
||||
clippy::mem_forget,
|
||||
clippy::string_add_assign,
|
||||
clippy::string_to_string,
|
||||
clippy::unnecessary_join,
|
||||
clippy::unnecessary_self_imports,
|
||||
clippy::unused_async,
|
||||
clippy::verbose_file_reads,
|
||||
clippy::zero_sized_map_values
|
||||
)]
|
||||
#![cfg_attr(feature = "unstable", feature(ip))]
|
||||
// The recursion_limit is mainly triggered by the json!() macro.
|
||||
// The more key/value pairs there are the more recursion occurs.
|
||||
@@ -6,7 +32,13 @@
|
||||
// If you go above 128 it will cause rust-analyzer to fail,
|
||||
#![recursion_limit = "87"]
|
||||
|
||||
extern crate openssl;
|
||||
// When enabled use MiMalloc as malloc instead of the default malloc
|
||||
#[cfg(feature = "enable_mimalloc")]
|
||||
use mimalloc::MiMalloc;
|
||||
#[cfg(feature = "enable_mimalloc")]
|
||||
#[cfg_attr(feature = "enable_mimalloc", global_allocator)]
|
||||
static GLOBAL: MiMalloc = MiMalloc;
|
||||
|
||||
#[macro_use]
|
||||
extern crate rocket;
|
||||
#[macro_use]
|
||||
@@ -20,8 +52,19 @@ extern crate diesel;
|
||||
#[macro_use]
|
||||
extern crate diesel_migrations;
|
||||
|
||||
use job_scheduler::{Job, JobScheduler};
|
||||
use std::{fs::create_dir_all, panic, path::Path, process::exit, str::FromStr, thread, time::Duration};
|
||||
use std::{
|
||||
fs::{canonicalize, create_dir_all},
|
||||
panic,
|
||||
path::Path,
|
||||
process::exit,
|
||||
str::FromStr,
|
||||
thread,
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncBufReadExt, BufReader},
|
||||
};
|
||||
|
||||
#[macro_use]
|
||||
mod error;
|
||||
@@ -37,9 +80,11 @@ mod util;
|
||||
|
||||
pub use config::CONFIG;
|
||||
pub use error::{Error, MapResult};
|
||||
use rocket::data::{Limits, ToByteUnit};
|
||||
pub use util::is_running_in_docker;
|
||||
|
||||
fn main() {
|
||||
#[rocket::main]
|
||||
async fn main() -> Result<(), Error> {
|
||||
parse_args();
|
||||
launch_info();
|
||||
|
||||
@@ -49,20 +94,23 @@ fn main() {
|
||||
|
||||
let extra_debug = matches!(level, LF::Trace | LF::Debug);
|
||||
|
||||
check_data_folder();
|
||||
check_data_folder().await;
|
||||
check_rsa_keys().unwrap_or_else(|_| {
|
||||
error!("Error creating keys, exiting...");
|
||||
exit(1);
|
||||
});
|
||||
check_web_vault();
|
||||
|
||||
create_icon_cache_folder();
|
||||
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
|
||||
create_dir(&CONFIG.tmp_folder(), "tmp folder");
|
||||
create_dir(&CONFIG.sends_folder(), "sends folder");
|
||||
create_dir(&CONFIG.attachments_folder(), "attachments folder");
|
||||
|
||||
let pool = create_db_pool();
|
||||
schedule_jobs(pool.clone());
|
||||
crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().unwrap()).unwrap();
|
||||
let pool = create_db_pool().await;
|
||||
schedule_jobs(pool.clone()).await;
|
||||
crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().await.unwrap()).await.unwrap();
|
||||
|
||||
launch_rocket(pool, extra_debug); // Blocks until program termination.
|
||||
launch_rocket(pool, extra_debug).await // Blocks until program termination.
|
||||
}
|
||||
|
||||
const HELP: &str = "\
|
||||
@@ -126,13 +174,13 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||
// Hide failed to close stream messages
|
||||
.level_for("hyper::server", log::LevelFilter::Warn)
|
||||
// Silence rocket logs
|
||||
.level_for("_", log::LevelFilter::Off)
|
||||
.level_for("launch", log::LevelFilter::Off)
|
||||
.level_for("launch_", log::LevelFilter::Off)
|
||||
.level_for("rocket::rocket", log::LevelFilter::Off)
|
||||
.level_for("rocket::fairing", log::LevelFilter::Off)
|
||||
// Never show html5ever and hyper::proto logs, too noisy
|
||||
.level_for("html5ever", log::LevelFilter::Off)
|
||||
.level_for("_", log::LevelFilter::Warn)
|
||||
.level_for("rocket::launch", log::LevelFilter::Error)
|
||||
.level_for("rocket::launch_", log::LevelFilter::Error)
|
||||
.level_for("rocket::rocket", log::LevelFilter::Warn)
|
||||
.level_for("rocket::server", log::LevelFilter::Warn)
|
||||
.level_for("rocket::fairing::fairings", log::LevelFilter::Warn)
|
||||
.level_for("rocket::shield::shield", log::LevelFilter::Warn)
|
||||
.level_for("hyper::proto", log::LevelFilter::Off)
|
||||
.level_for("hyper::client", log::LevelFilter::Off)
|
||||
// Prevent cookie_store logs
|
||||
@@ -243,11 +291,7 @@ fn create_dir(path: &str, description: &str) {
|
||||
create_dir_all(path).expect(&err_msg);
|
||||
}
|
||||
|
||||
fn create_icon_cache_folder() {
|
||||
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
|
||||
}
|
||||
|
||||
fn check_data_folder() {
|
||||
async fn check_data_folder() {
|
||||
let data_folder = &CONFIG.data_folder();
|
||||
let path = Path::new(data_folder);
|
||||
if !path.exists() {
|
||||
@@ -259,6 +303,57 @@ fn check_data_folder() {
|
||||
}
|
||||
exit(1);
|
||||
}
|
||||
if !path.is_dir() {
|
||||
error!("Data folder '{}' is not a directory.", data_folder);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if is_running_in_docker()
|
||||
&& std::env::var("I_REALLY_WANT_VOLATILE_STORAGE").is_err()
|
||||
&& !docker_data_folder_is_persistent(data_folder).await
|
||||
{
|
||||
error!(
|
||||
"No persistent volume!\n\
|
||||
########################################################################################\n\
|
||||
# It looks like you did not configure a persistent volume! #\n\
|
||||
# This will result in permanent data loss when the container is removed or updated! #\n\
|
||||
# If you really want to use volatile storage set `I_REALLY_WANT_VOLATILE_STORAGE=true` #\n\
|
||||
########################################################################################\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Detect when using Docker or Podman the DATA_FOLDER is either a bind-mount or a volume created manually.
|
||||
/// If not created manually, then the data will not be persistent.
|
||||
/// A none persistent volume in either Docker or Podman is represented by a 64 alphanumerical string.
|
||||
/// If we detect this string, we will alert about not having a persistent self defined volume.
|
||||
/// This probably means that someone forgot to add `-v /path/to/vaultwarden_data/:/data`
|
||||
async fn docker_data_folder_is_persistent(data_folder: &str) -> bool {
|
||||
if let Ok(mountinfo) = File::open("/proc/self/mountinfo").await {
|
||||
// Since there can only be one mountpoint to the DATA_FOLDER
|
||||
// We do a basic check for this mountpoint surrounded by a space.
|
||||
let data_folder_match = if data_folder.starts_with('/') {
|
||||
format!(" {data_folder} ")
|
||||
} else {
|
||||
format!(" /{data_folder} ")
|
||||
};
|
||||
let mut lines = BufReader::new(mountinfo).lines();
|
||||
while let Some(line) = lines.next_line().await.unwrap_or_default() {
|
||||
// Only execute a regex check if we find the base match
|
||||
if line.contains(&data_folder_match) {
|
||||
let re = regex::Regex::new(r"/volumes/[a-z0-9]{64}/_data /").unwrap();
|
||||
if re.is_match(&line) {
|
||||
return false;
|
||||
}
|
||||
// If we did found a match for the mountpoint, but not the regex, then still stop searching.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// In all other cases, just assume a true.
|
||||
// This is just an informative check to try and prevent data loss.
|
||||
true
|
||||
}
|
||||
|
||||
fn check_rsa_keys() -> Result<(), crate::error::Error> {
|
||||
@@ -275,7 +370,7 @@ fn check_rsa_keys() -> Result<(), crate::error::Error> {
|
||||
}
|
||||
|
||||
if !util::file_exists(&pub_path) {
|
||||
let rsa_key = openssl::rsa::Rsa::private_key_from_pem(&util::read_file(&priv_path)?)?;
|
||||
let rsa_key = openssl::rsa::Rsa::private_key_from_pem(&std::fs::read(&priv_path)?)?;
|
||||
|
||||
let pub_key = rsa_key.public_key_to_pem()?;
|
||||
crate::util::write_file(&pub_path, &pub_key)?;
|
||||
@@ -304,8 +399,8 @@ fn check_web_vault() {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_db_pool() -> db::DbPool {
|
||||
match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) {
|
||||
async fn create_db_pool() -> db::DbPool {
|
||||
match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()).await {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
error!("Error creating database pool: {:?}", e);
|
||||
@@ -314,51 +409,76 @@ fn create_db_pool() -> db::DbPool {
|
||||
}
|
||||
}
|
||||
|
||||
fn launch_rocket(pool: db::DbPool, extra_debug: bool) {
|
||||
async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> {
|
||||
let basepath = &CONFIG.domain_path();
|
||||
|
||||
let mut config = rocket::Config::from(rocket::Config::figment());
|
||||
config.temp_dir = canonicalize(CONFIG.tmp_folder()).unwrap().into();
|
||||
config.cli_colors = false; // Make sure Rocket does not color any values for logging.
|
||||
config.limits = Limits::new()
|
||||
.limit("json", 20.megabytes()) // 20MB should be enough for very large imports, something like 5000+ vault entries
|
||||
.limit("data-form", 525.megabytes()) // This needs to match the maximum allowed file size for Send
|
||||
.limit("file", 525.megabytes()); // This needs to match the maximum allowed file size for attachments
|
||||
|
||||
// If adding more paths here, consider also adding them to
|
||||
// crate::utils::LOGGED_ROUTES to make sure they appear in the log
|
||||
let result = rocket::ignite()
|
||||
.mount(&[basepath, "/"].concat(), api::web_routes())
|
||||
.mount(&[basepath, "/api"].concat(), api::core_routes())
|
||||
.mount(&[basepath, "/admin"].concat(), api::admin_routes())
|
||||
.mount(&[basepath, "/identity"].concat(), api::identity_routes())
|
||||
.mount(&[basepath, "/icons"].concat(), api::icons_routes())
|
||||
.mount(&[basepath, "/notifications"].concat(), api::notifications_routes())
|
||||
let instance = rocket::custom(config)
|
||||
.mount([basepath, "/"].concat(), api::web_routes())
|
||||
.mount([basepath, "/api"].concat(), api::core_routes())
|
||||
.mount([basepath, "/admin"].concat(), api::admin_routes())
|
||||
.mount([basepath, "/identity"].concat(), api::identity_routes())
|
||||
.mount([basepath, "/icons"].concat(), api::icons_routes())
|
||||
.mount([basepath, "/notifications"].concat(), api::notifications_routes())
|
||||
.register([basepath, "/"].concat(), api::web_catchers())
|
||||
.register([basepath, "/api"].concat(), api::core_catchers())
|
||||
.manage(pool)
|
||||
.manage(api::start_notification_server())
|
||||
.attach(util::AppHeaders())
|
||||
.attach(util::Cors())
|
||||
.attach(util::BetterLogging(extra_debug))
|
||||
.launch();
|
||||
.ignite()
|
||||
.await?;
|
||||
|
||||
// Launch and print error if there is one
|
||||
// The launch will restore the original logging level
|
||||
error!("Launch error {:#?}", result);
|
||||
CONFIG.set_rocket_shutdown_handle(instance.shutdown());
|
||||
ctrlc::set_handler(move || {
|
||||
info!("Exiting vaultwarden!");
|
||||
CONFIG.shutdown();
|
||||
})
|
||||
.expect("Error setting Ctrl-C handler");
|
||||
|
||||
let _ = instance.launch().await?;
|
||||
|
||||
info!("Vaultwarden process exited!");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn schedule_jobs(pool: db::DbPool) {
|
||||
async fn schedule_jobs(pool: db::DbPool) {
|
||||
if CONFIG.job_poll_interval_ms() == 0 {
|
||||
info!("Job scheduler disabled.");
|
||||
return;
|
||||
}
|
||||
|
||||
let runtime = tokio::runtime::Runtime::new().unwrap();
|
||||
|
||||
thread::Builder::new()
|
||||
.name("job-scheduler".to_string())
|
||||
.spawn(move || {
|
||||
use job_scheduler_ng::{Job, JobScheduler};
|
||||
let _runtime_guard = runtime.enter();
|
||||
|
||||
let mut sched = JobScheduler::new();
|
||||
|
||||
// Purge sends that are past their deletion date.
|
||||
if !CONFIG.send_purge_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.send_purge_schedule().parse().unwrap(), || {
|
||||
api::purge_sends(pool.clone());
|
||||
runtime.spawn(api::purge_sends(pool.clone()));
|
||||
}));
|
||||
}
|
||||
|
||||
// Purge trashed items that are old enough to be auto-deleted.
|
||||
if !CONFIG.trash_purge_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.trash_purge_schedule().parse().unwrap(), || {
|
||||
api::purge_trashed_ciphers(pool.clone());
|
||||
runtime.spawn(api::purge_trashed_ciphers(pool.clone()));
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -366,7 +486,7 @@ fn schedule_jobs(pool: db::DbPool) {
|
||||
// indicates that a user's master password has been compromised.
|
||||
if !CONFIG.incomplete_2fa_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.incomplete_2fa_schedule().parse().unwrap(), || {
|
||||
api::send_incomplete_2fa_notifications(pool.clone());
|
||||
runtime.spawn(api::send_incomplete_2fa_notifications(pool.clone()));
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -375,7 +495,7 @@ fn schedule_jobs(pool: db::DbPool) {
|
||||
// sending reminders for requests that are about to be granted anyway.
|
||||
if !CONFIG.emergency_request_timeout_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.emergency_request_timeout_schedule().parse().unwrap(), || {
|
||||
api::emergency_request_timeout_job(pool.clone());
|
||||
runtime.spawn(api::emergency_request_timeout_job(pool.clone()));
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -383,7 +503,7 @@ fn schedule_jobs(pool: db::DbPool) {
|
||||
// emergency access requests.
|
||||
if !CONFIG.emergency_notification_reminder_schedule().is_empty() {
|
||||
sched.add(Job::new(CONFIG.emergency_notification_reminder_schedule().parse().unwrap(), || {
|
||||
api::emergency_notification_reminder_job(pool.clone());
|
||||
runtime.spawn(api::emergency_notification_reminder_job(pool.clone()));
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -398,7 +518,9 @@ fn schedule_jobs(pool: db::DbPool) {
|
||||
// tick, the one that was added earlier will run first.
|
||||
loop {
|
||||
sched.tick();
|
||||
thread::sleep(Duration::from_millis(CONFIG.job_poll_interval_ms()));
|
||||
runtime.block_on(async move {
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(CONFIG.job_poll_interval_ms())).await
|
||||
});
|
||||
}
|
||||
})
|
||||
.expect("Error spawning job scheduler thread");
|
||||
|
||||
@@ -328,6 +328,7 @@
|
||||
"Type": 33,
|
||||
"Domains": [
|
||||
"healthcare.gov",
|
||||
"cuidadodesalud.gov",
|
||||
"cms.gov"
|
||||
],
|
||||
"Excluded": false
|
||||
@@ -902,6 +903,7 @@
|
||||
{
|
||||
"Type": 85,
|
||||
"Domains": [
|
||||
"proton.me",
|
||||
"protonmail.com",
|
||||
"protonvpn.com"
|
||||
],
|
||||
@@ -922,5 +924,20 @@
|
||||
"wise.com"
|
||||
],
|
||||
"Excluded": false
|
||||
},
|
||||
{
|
||||
"Type": 88,
|
||||
"Domains": [
|
||||
"takeaway.com",
|
||||
"just-eat.dk",
|
||||
"just-eat.no",
|
||||
"just-eat.fr",
|
||||
"just-eat.ch",
|
||||
"lieferando.de",
|
||||
"lieferando.at",
|
||||
"thuisbezorgd.nl",
|
||||
"pyszne.pl"
|
||||
],
|
||||
"Excluded": false
|
||||
}
|
||||
]
|
||||
6050
src/static/scripts/bootstrap-native.js
vendored
6050
src/static/scripts/bootstrap-native.js
vendored
File diff suppressed because it is too large
Load Diff
4543
src/static/scripts/bootstrap.css
vendored
4543
src/static/scripts/bootstrap.css
vendored
File diff suppressed because it is too large
Load Diff
278
src/static/scripts/datatables.css
vendored
278
src/static/scripts/datatables.css
vendored
@@ -4,22 +4,175 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs5/dt-1.11.3
|
||||
* https://datatables.net/download/#bs5/dt-1.12.1
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 1.11.3
|
||||
* DataTables 1.12.1
|
||||
*/
|
||||
|
||||
@charset "UTF-8";
|
||||
td.dt-control {
|
||||
background: url("https://www.datatables.net/examples/resources/details_open.png") no-repeat center center;
|
||||
table.dataTable td.dt-control {
|
||||
text-align: center;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
tr.dt-hasChild td.dt-control {
|
||||
background: url("https://www.datatables.net/examples/resources/details_close.png") no-repeat center center;
|
||||
table.dataTable td.dt-control:before {
|
||||
height: 1em;
|
||||
width: 1em;
|
||||
margin-top: -9px;
|
||||
display: inline-block;
|
||||
color: white;
|
||||
border: 0.15em solid white;
|
||||
border-radius: 1em;
|
||||
box-shadow: 0 0 0.2em #444;
|
||||
box-sizing: content-box;
|
||||
text-align: center;
|
||||
text-indent: 0 !important;
|
||||
font-family: "Courier New", Courier, monospace;
|
||||
line-height: 1em;
|
||||
content: "+";
|
||||
background-color: #31b131;
|
||||
}
|
||||
table.dataTable tr.dt-hasChild td.dt-control:before {
|
||||
content: "-";
|
||||
background-color: #d33333;
|
||||
}
|
||||
|
||||
table.dataTable thead > tr > th.sorting, table.dataTable thead > tr > th.sorting_asc, table.dataTable thead > tr > th.sorting_desc, table.dataTable thead > tr > th.sorting_asc_disabled, table.dataTable thead > tr > th.sorting_desc_disabled,
|
||||
table.dataTable thead > tr > td.sorting,
|
||||
table.dataTable thead > tr > td.sorting_asc,
|
||||
table.dataTable thead > tr > td.sorting_desc,
|
||||
table.dataTable thead > tr > td.sorting_asc_disabled,
|
||||
table.dataTable thead > tr > td.sorting_desc_disabled {
|
||||
cursor: pointer;
|
||||
position: relative;
|
||||
padding-right: 26px;
|
||||
}
|
||||
table.dataTable thead > tr > th.sorting:before, table.dataTable thead > tr > th.sorting:after, table.dataTable thead > tr > th.sorting_asc:before, table.dataTable thead > tr > th.sorting_asc:after, table.dataTable thead > tr > th.sorting_desc:before, table.dataTable thead > tr > th.sorting_desc:after, table.dataTable thead > tr > th.sorting_asc_disabled:before, table.dataTable thead > tr > th.sorting_asc_disabled:after, table.dataTable thead > tr > th.sorting_desc_disabled:before, table.dataTable thead > tr > th.sorting_desc_disabled:after,
|
||||
table.dataTable thead > tr > td.sorting:before,
|
||||
table.dataTable thead > tr > td.sorting:after,
|
||||
table.dataTable thead > tr > td.sorting_asc:before,
|
||||
table.dataTable thead > tr > td.sorting_asc:after,
|
||||
table.dataTable thead > tr > td.sorting_desc:before,
|
||||
table.dataTable thead > tr > td.sorting_desc:after,
|
||||
table.dataTable thead > tr > td.sorting_asc_disabled:before,
|
||||
table.dataTable thead > tr > td.sorting_asc_disabled:after,
|
||||
table.dataTable thead > tr > td.sorting_desc_disabled:before,
|
||||
table.dataTable thead > tr > td.sorting_desc_disabled:after {
|
||||
position: absolute;
|
||||
display: block;
|
||||
opacity: 0.125;
|
||||
right: 10px;
|
||||
line-height: 9px;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
table.dataTable thead > tr > th.sorting:before, table.dataTable thead > tr > th.sorting_asc:before, table.dataTable thead > tr > th.sorting_desc:before, table.dataTable thead > tr > th.sorting_asc_disabled:before, table.dataTable thead > tr > th.sorting_desc_disabled:before,
|
||||
table.dataTable thead > tr > td.sorting:before,
|
||||
table.dataTable thead > tr > td.sorting_asc:before,
|
||||
table.dataTable thead > tr > td.sorting_desc:before,
|
||||
table.dataTable thead > tr > td.sorting_asc_disabled:before,
|
||||
table.dataTable thead > tr > td.sorting_desc_disabled:before {
|
||||
bottom: 50%;
|
||||
content: "▴";
|
||||
}
|
||||
table.dataTable thead > tr > th.sorting:after, table.dataTable thead > tr > th.sorting_asc:after, table.dataTable thead > tr > th.sorting_desc:after, table.dataTable thead > tr > th.sorting_asc_disabled:after, table.dataTable thead > tr > th.sorting_desc_disabled:after,
|
||||
table.dataTable thead > tr > td.sorting:after,
|
||||
table.dataTable thead > tr > td.sorting_asc:after,
|
||||
table.dataTable thead > tr > td.sorting_desc:after,
|
||||
table.dataTable thead > tr > td.sorting_asc_disabled:after,
|
||||
table.dataTable thead > tr > td.sorting_desc_disabled:after {
|
||||
top: 50%;
|
||||
content: "▾";
|
||||
}
|
||||
table.dataTable thead > tr > th.sorting_asc:before, table.dataTable thead > tr > th.sorting_desc:after,
|
||||
table.dataTable thead > tr > td.sorting_asc:before,
|
||||
table.dataTable thead > tr > td.sorting_desc:after {
|
||||
opacity: 0.6;
|
||||
}
|
||||
table.dataTable thead > tr > th.sorting_desc_disabled:after, table.dataTable thead > tr > th.sorting_asc_disabled:before,
|
||||
table.dataTable thead > tr > td.sorting_desc_disabled:after,
|
||||
table.dataTable thead > tr > td.sorting_asc_disabled:before {
|
||||
display: none;
|
||||
}
|
||||
table.dataTable thead > tr > th:active,
|
||||
table.dataTable thead > tr > td:active {
|
||||
outline: none;
|
||||
}
|
||||
|
||||
div.dataTables_scrollBody table.dataTable thead > tr > th:before, div.dataTables_scrollBody table.dataTable thead > tr > th:after,
|
||||
div.dataTables_scrollBody table.dataTable thead > tr > td:before,
|
||||
div.dataTables_scrollBody table.dataTable thead > tr > td:after {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.dataTables_processing {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
width: 200px;
|
||||
margin-left: -100px;
|
||||
margin-top: -26px;
|
||||
text-align: center;
|
||||
padding: 2px;
|
||||
}
|
||||
div.dataTables_processing > div:last-child {
|
||||
position: relative;
|
||||
width: 80px;
|
||||
height: 15px;
|
||||
margin: 1em auto;
|
||||
}
|
||||
div.dataTables_processing > div:last-child > div {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
width: 13px;
|
||||
height: 13px;
|
||||
border-radius: 50%;
|
||||
background: rgba(13, 110, 253, 0.9);
|
||||
animation-timing-function: cubic-bezier(0, 1, 1, 0);
|
||||
}
|
||||
div.dataTables_processing > div:last-child > div:nth-child(1) {
|
||||
left: 8px;
|
||||
animation: datatables-loader-1 0.6s infinite;
|
||||
}
|
||||
div.dataTables_processing > div:last-child > div:nth-child(2) {
|
||||
left: 8px;
|
||||
animation: datatables-loader-2 0.6s infinite;
|
||||
}
|
||||
div.dataTables_processing > div:last-child > div:nth-child(3) {
|
||||
left: 32px;
|
||||
animation: datatables-loader-2 0.6s infinite;
|
||||
}
|
||||
div.dataTables_processing > div:last-child > div:nth-child(4) {
|
||||
left: 56px;
|
||||
animation: datatables-loader-3 0.6s infinite;
|
||||
}
|
||||
|
||||
@keyframes datatables-loader-1 {
|
||||
0% {
|
||||
transform: scale(0);
|
||||
}
|
||||
100% {
|
||||
transform: scale(1);
|
||||
}
|
||||
}
|
||||
@keyframes datatables-loader-3 {
|
||||
0% {
|
||||
transform: scale(1);
|
||||
}
|
||||
100% {
|
||||
transform: scale(0);
|
||||
}
|
||||
}
|
||||
@keyframes datatables-loader-2 {
|
||||
0% {
|
||||
transform: translate(0, 0);
|
||||
}
|
||||
100% {
|
||||
transform: translate(24px, 0);
|
||||
}
|
||||
}
|
||||
table.dataTable.nowrap th, table.dataTable.nowrap td {
|
||||
white-space: nowrap;
|
||||
}
|
||||
table.dataTable th.dt-left,
|
||||
table.dataTable td.dt-left {
|
||||
text-align: left;
|
||||
@@ -41,6 +194,12 @@ table.dataTable th.dt-nowrap,
|
||||
table.dataTable td.dt-nowrap {
|
||||
white-space: nowrap;
|
||||
}
|
||||
table.dataTable thead th,
|
||||
table.dataTable thead td,
|
||||
table.dataTable tfoot th,
|
||||
table.dataTable tfoot td {
|
||||
text-align: left;
|
||||
}
|
||||
table.dataTable thead th.dt-head-left,
|
||||
table.dataTable thead td.dt-head-left,
|
||||
table.dataTable tfoot th.dt-head-left,
|
||||
@@ -118,6 +277,28 @@ table.dataTable.nowrap th,
|
||||
table.dataTable.nowrap td {
|
||||
white-space: nowrap;
|
||||
}
|
||||
table.dataTable.table-striped > tbody > tr:nth-of-type(2n+1) > * {
|
||||
box-shadow: none;
|
||||
}
|
||||
table.dataTable > tbody > tr {
|
||||
background-color: transparent;
|
||||
}
|
||||
table.dataTable > tbody > tr.selected > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.9);
|
||||
color: white;
|
||||
}
|
||||
table.dataTable.table-striped > tbody > tr.odd > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(0, 0, 0, 0.05);
|
||||
}
|
||||
table.dataTable.table-striped > tbody > tr.odd.selected > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.95);
|
||||
}
|
||||
table.dataTable.table-hover > tbody > tr:hover > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(0, 0, 0, 0.075);
|
||||
}
|
||||
table.dataTable.table-hover > tbody > tr.selected:hover > * {
|
||||
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.975);
|
||||
}
|
||||
|
||||
div.dataTables_wrapper div.dataTables_length label {
|
||||
font-weight: normal;
|
||||
@@ -154,71 +335,6 @@ div.dataTables_wrapper div.dataTables_paginate ul.pagination {
|
||||
white-space: nowrap;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
div.dataTables_wrapper div.dataTables_processing {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
width: 200px;
|
||||
margin-left: -100px;
|
||||
margin-top: -26px;
|
||||
text-align: center;
|
||||
padding: 1em 0;
|
||||
}
|
||||
|
||||
table.dataTable > thead > tr > th:active,
|
||||
table.dataTable > thead > tr > td:active {
|
||||
outline: none;
|
||||
}
|
||||
table.dataTable > thead > tr > th:not(.sorting_disabled),
|
||||
table.dataTable > thead > tr > td:not(.sorting_disabled) {
|
||||
padding-right: 30px;
|
||||
}
|
||||
table.dataTable > thead .sorting,
|
||||
table.dataTable > thead .sorting_asc,
|
||||
table.dataTable > thead .sorting_desc,
|
||||
table.dataTable > thead .sorting_asc_disabled,
|
||||
table.dataTable > thead .sorting_desc_disabled {
|
||||
cursor: pointer;
|
||||
position: relative;
|
||||
}
|
||||
table.dataTable > thead .sorting:before, table.dataTable > thead .sorting:after,
|
||||
table.dataTable > thead .sorting_asc:before,
|
||||
table.dataTable > thead .sorting_asc:after,
|
||||
table.dataTable > thead .sorting_desc:before,
|
||||
table.dataTable > thead .sorting_desc:after,
|
||||
table.dataTable > thead .sorting_asc_disabled:before,
|
||||
table.dataTable > thead .sorting_asc_disabled:after,
|
||||
table.dataTable > thead .sorting_desc_disabled:before,
|
||||
table.dataTable > thead .sorting_desc_disabled:after {
|
||||
position: absolute;
|
||||
bottom: 0.5em;
|
||||
display: block;
|
||||
opacity: 0.3;
|
||||
}
|
||||
table.dataTable > thead .sorting:before,
|
||||
table.dataTable > thead .sorting_asc:before,
|
||||
table.dataTable > thead .sorting_desc:before,
|
||||
table.dataTable > thead .sorting_asc_disabled:before,
|
||||
table.dataTable > thead .sorting_desc_disabled:before {
|
||||
right: 1em;
|
||||
content: "↑";
|
||||
}
|
||||
table.dataTable > thead .sorting:after,
|
||||
table.dataTable > thead .sorting_asc:after,
|
||||
table.dataTable > thead .sorting_desc:after,
|
||||
table.dataTable > thead .sorting_asc_disabled:after,
|
||||
table.dataTable > thead .sorting_desc_disabled:after {
|
||||
right: 0.5em;
|
||||
content: "↓";
|
||||
}
|
||||
table.dataTable > thead .sorting_asc:before,
|
||||
table.dataTable > thead .sorting_desc:after {
|
||||
opacity: 1;
|
||||
}
|
||||
table.dataTable > thead .sorting_asc_disabled:before,
|
||||
table.dataTable > thead .sorting_desc_disabled:after {
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
div.dataTables_scrollHead table.dataTable {
|
||||
margin-bottom: 0 !important;
|
||||
@@ -264,17 +380,6 @@ div.dataTables_wrapper div.dataTables_paginate {
|
||||
table.dataTable.table-sm > thead > tr > th:not(.sorting_disabled) {
|
||||
padding-right: 20px;
|
||||
}
|
||||
table.dataTable.table-sm .sorting:before,
|
||||
table.dataTable.table-sm .sorting_asc:before,
|
||||
table.dataTable.table-sm .sorting_desc:before {
|
||||
top: 5px;
|
||||
right: 0.85em;
|
||||
}
|
||||
table.dataTable.table-sm .sorting:after,
|
||||
table.dataTable.table-sm .sorting_asc:after,
|
||||
table.dataTable.table-sm .sorting_desc:after {
|
||||
top: 5px;
|
||||
}
|
||||
|
||||
table.table-bordered.dataTable {
|
||||
border-right-width: 0;
|
||||
@@ -316,11 +421,4 @@ div.table-responsive > div.dataTables_wrapper > div.row > div[class^=col-]:last-
|
||||
padding-right: 0;
|
||||
}
|
||||
|
||||
table.dataTable.table-striped > tbody > tr:nth-of-type(2n+1) {
|
||||
--bs-table-accent-bg: transparent;
|
||||
}
|
||||
table.dataTable.table-striped > tbody > tr.odd {
|
||||
--bs-table-accent-bg: var(--bs-table-striped-bg);
|
||||
}
|
||||
|
||||
|
||||
|
||||
460
src/static/scripts/datatables.js
vendored
460
src/static/scripts/datatables.js
vendored
@@ -4,24 +4,23 @@
|
||||
*
|
||||
* To rebuild or modify this file with the latest versions of the included
|
||||
* software please visit:
|
||||
* https://datatables.net/download/#bs5/dt-1.11.3
|
||||
* https://datatables.net/download/#bs5/dt-1.12.1
|
||||
*
|
||||
* Included libraries:
|
||||
* DataTables 1.11.3
|
||||
* DataTables 1.12.1
|
||||
*/
|
||||
|
||||
/*! DataTables 1.11.3
|
||||
* ©2008-2021 SpryMedia Ltd - datatables.net/license
|
||||
/*! DataTables 1.12.1
|
||||
* ©2008-2022 SpryMedia Ltd - datatables.net/license
|
||||
*/
|
||||
|
||||
/**
|
||||
* @summary DataTables
|
||||
* @description Paginate, search and order HTML tables
|
||||
* @version 1.11.3
|
||||
* @file jquery.dataTables.js
|
||||
* @version 1.12.1
|
||||
* @author SpryMedia Ltd
|
||||
* @contact www.datatables.net
|
||||
* @copyright Copyright 2008-2021 SpryMedia Ltd.
|
||||
* @copyright SpryMedia Ltd.
|
||||
*
|
||||
* This source file is free software, available under the following license:
|
||||
* MIT license - http://datatables.net/license
|
||||
@@ -71,38 +70,7 @@
|
||||
(function( $, window, document, undefined ) {
|
||||
"use strict";
|
||||
|
||||
/**
|
||||
* DataTables is a plug-in for the jQuery Javascript library. It is a highly
|
||||
* flexible tool, based upon the foundations of progressive enhancement,
|
||||
* which will add advanced interaction controls to any HTML table. For a
|
||||
* full list of features please refer to
|
||||
* [DataTables.net](href="http://datatables.net).
|
||||
*
|
||||
* Note that the `DataTable` object is not a global variable but is aliased
|
||||
* to `jQuery.fn.DataTable` and `jQuery.fn.dataTable` through which it may
|
||||
* be accessed.
|
||||
*
|
||||
* @class
|
||||
* @param {object} [init={}] Configuration object for DataTables. Options
|
||||
* are defined by {@link DataTable.defaults}
|
||||
* @requires jQuery 1.7+
|
||||
*
|
||||
* @example
|
||||
* // Basic initialisation
|
||||
* $(document).ready( function {
|
||||
* $('#example').dataTable();
|
||||
* } );
|
||||
*
|
||||
* @example
|
||||
* // Initialisation with configuration options - in this case, disable
|
||||
* // pagination and sorting.
|
||||
* $(document).ready( function {
|
||||
* $('#example').dataTable( {
|
||||
* "paginate": false,
|
||||
* "sort": false
|
||||
* } );
|
||||
* } );
|
||||
*/
|
||||
|
||||
var DataTable = function ( selector, options )
|
||||
{
|
||||
// When creating with `new`, create a new DataTable, returning the API instance
|
||||
@@ -113,7 +81,7 @@
|
||||
// Argument switching
|
||||
options = selector;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Perform a jQuery selector action on the table's TR elements (from the tbody) and
|
||||
* return the resulting jQuery object.
|
||||
@@ -869,24 +837,24 @@
|
||||
*/
|
||||
this.fnVersionCheck = _ext.fnVersionCheck;
|
||||
|
||||
|
||||
|
||||
var _that = this;
|
||||
var emptyInit = options === undefined;
|
||||
var len = this.length;
|
||||
|
||||
|
||||
if ( emptyInit ) {
|
||||
options = {};
|
||||
}
|
||||
|
||||
|
||||
this.oApi = this.internal = _ext.internal;
|
||||
|
||||
|
||||
// Extend with old style plug-in API methods
|
||||
for ( var fn in DataTable.ext.internal ) {
|
||||
if ( fn ) {
|
||||
this[fn] = _fnExternApiFunc(fn);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
this.each(function() {
|
||||
// For each initialisation we want to give it a clean initialisation
|
||||
// object that can be bashed around
|
||||
@@ -894,7 +862,7 @@
|
||||
var oInit = len > 1 ? // optimisation for single table case
|
||||
_fnExtend( o, options, true ) :
|
||||
options;
|
||||
|
||||
|
||||
/*global oInit,_that,emptyInit*/
|
||||
var i=0, iLen, j, jLen, k, kLen;
|
||||
var sId = this.getAttribute( 'id' );
|
||||
@@ -1108,7 +1076,7 @@
|
||||
success: function ( json ) {
|
||||
_fnCamelToHungarian( defaults.oLanguage, json );
|
||||
_fnLanguageCompat( json );
|
||||
$.extend( true, oLanguage, json );
|
||||
$.extend( true, oLanguage, json, oSettings.oInit.oLanguage );
|
||||
|
||||
_fnCallbackFire( oSettings, null, 'i18n', [oSettings]);
|
||||
_fnInitialise( oSettings );
|
||||
@@ -1337,7 +1305,7 @@
|
||||
_that = null;
|
||||
return this;
|
||||
};
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* It is useful to have variables which are scoped locally so only the
|
||||
@@ -2341,9 +2309,17 @@
|
||||
th.addClass( oOptions.sClass );
|
||||
}
|
||||
|
||||
var origClass = oCol.sClass;
|
||||
|
||||
$.extend( oCol, oOptions );
|
||||
_fnMap( oCol, oOptions, "sWidth", "sWidthOrig" );
|
||||
|
||||
// Merge class from previously defined classes with this one, rather than just
|
||||
// overwriting it in the extend above
|
||||
if (origClass !== oCol.sClass) {
|
||||
oCol.sClass = origClass + ' ' + oCol.sClass;
|
||||
}
|
||||
|
||||
/* iDataSort to be applied (backwards compatibility), but aDataSort will take
|
||||
* priority if defined
|
||||
*/
|
||||
@@ -2616,9 +2592,11 @@
|
||||
def = aoColDefs[i];
|
||||
|
||||
/* Each definition can target multiple columns, as it is an array */
|
||||
var aTargets = def.targets !== undefined ?
|
||||
def.targets :
|
||||
def.aTargets;
|
||||
var aTargets = def.target !== undefined
|
||||
? def.target
|
||||
: def.targets !== undefined
|
||||
? def.targets
|
||||
: def.aTargets;
|
||||
|
||||
if ( ! Array.isArray( aTargets ) )
|
||||
{
|
||||
@@ -3462,6 +3440,9 @@
|
||||
*/
|
||||
function _fnDraw( oSettings, ajaxComplete )
|
||||
{
|
||||
// Allow for state saving and a custom start position
|
||||
_fnStart( oSettings );
|
||||
|
||||
/* Provide a pre-callback function which can be used to cancel the draw is false is returned */
|
||||
var aPreDraw = _fnCallbackFire( oSettings, 'aoPreDrawCallback', 'preDraw', [oSettings] );
|
||||
if ( $.inArray( false, aPreDraw ) !== -1 )
|
||||
@@ -3470,34 +3451,18 @@
|
||||
return;
|
||||
}
|
||||
|
||||
var i, iLen, n;
|
||||
var anRows = [];
|
||||
var iRowCount = 0;
|
||||
var asStripeClasses = oSettings.asStripeClasses;
|
||||
var iStripes = asStripeClasses.length;
|
||||
var iOpenRows = oSettings.aoOpenRows.length;
|
||||
var oLang = oSettings.oLanguage;
|
||||
var iInitDisplayStart = oSettings.iInitDisplayStart;
|
||||
var bServerSide = _fnDataSource( oSettings ) == 'ssp';
|
||||
var aiDisplay = oSettings.aiDisplay;
|
||||
|
||||
oSettings.bDrawing = true;
|
||||
|
||||
/* Check and see if we have an initial draw position from state saving */
|
||||
if ( iInitDisplayStart !== undefined && iInitDisplayStart !== -1 )
|
||||
{
|
||||
oSettings._iDisplayStart = bServerSide ?
|
||||
iInitDisplayStart :
|
||||
iInitDisplayStart >= oSettings.fnRecordsDisplay() ?
|
||||
0 :
|
||||
iInitDisplayStart;
|
||||
|
||||
oSettings.iInitDisplayStart = -1;
|
||||
}
|
||||
|
||||
var iDisplayStart = oSettings._iDisplayStart;
|
||||
var iDisplayEnd = oSettings.fnDisplayEnd();
|
||||
|
||||
oSettings.bDrawing = true;
|
||||
|
||||
/* Server-side processing draw intercept */
|
||||
if ( oSettings.bDeferLoading )
|
||||
{
|
||||
@@ -3899,6 +3864,28 @@
|
||||
return aReturn;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the start position for draw
|
||||
* @param {object} oSettings dataTables settings object
|
||||
*/
|
||||
function _fnStart( oSettings )
|
||||
{
|
||||
var bServerSide = _fnDataSource( oSettings ) == 'ssp';
|
||||
var iInitDisplayStart = oSettings.iInitDisplayStart;
|
||||
|
||||
// Check and see if we have an initial draw position from state saving
|
||||
if ( iInitDisplayStart !== undefined && iInitDisplayStart !== -1 )
|
||||
{
|
||||
oSettings._iDisplayStart = bServerSide ?
|
||||
iInitDisplayStart :
|
||||
iInitDisplayStart >= oSettings.fnRecordsDisplay() ?
|
||||
0 :
|
||||
iInitDisplayStart;
|
||||
|
||||
oSettings.iInitDisplayStart = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an Ajax call based on the table's settings, taking into account that
|
||||
* parameters can have multiple forms, and backwards compatibility.
|
||||
@@ -3942,8 +3929,8 @@
|
||||
var ajax = oSettings.ajax;
|
||||
var instance = oSettings.oInstance;
|
||||
var callback = function ( json ) {
|
||||
var status = oSettings.jqXhr
|
||||
? oSettings.jqXhr.status
|
||||
var status = oSettings.jqXHR
|
||||
? oSettings.jqXHR.status
|
||||
: null;
|
||||
|
||||
if ( json === null || (typeof status === 'number' && status == 204 ) ) {
|
||||
@@ -5111,6 +5098,7 @@
|
||||
'class': settings.oClasses.sProcessing
|
||||
} )
|
||||
.html( settings.oLanguage.sProcessing )
|
||||
.append('<div><div></div><div></div><div></div><div></div></div>')
|
||||
.insertBefore( settings.nTable )[0];
|
||||
}
|
||||
|
||||
@@ -5360,6 +5348,7 @@
|
||||
footerCopy = footer.clone().prependTo( table );
|
||||
footerTrgEls = footer.find('tr'); // the original tfoot is in its own table and must be sized
|
||||
footerSrcEls = footerCopy.find('tr');
|
||||
footerCopy.find('[id]').removeAttr('id');
|
||||
}
|
||||
|
||||
// Clone the current header and footer elements and then place it into the inner table
|
||||
@@ -5367,6 +5356,7 @@
|
||||
headerTrgEls = header.find('tr'); // original header is in its own table
|
||||
headerSrcEls = headerCopy.find('tr');
|
||||
headerCopy.find('th, td').removeAttr('tabindex');
|
||||
headerCopy.find('[id]').removeAttr('id');
|
||||
|
||||
|
||||
/*
|
||||
@@ -5440,7 +5430,7 @@
|
||||
nToSize.style.width = headerWidths[i];
|
||||
}, headerTrgEls );
|
||||
|
||||
$(headerSrcEls).height(0);
|
||||
$(headerSrcEls).css('height', 0);
|
||||
|
||||
/* Same again with the footer if we have one */
|
||||
if ( footer )
|
||||
@@ -5487,7 +5477,7 @@
|
||||
|
||||
// Sanity check that the table is of a sensible width. If not then we are going to get
|
||||
// misalignment - try to prevent this by not allowing the table to shrink below its min width
|
||||
if ( table.outerWidth() < sanityWidth )
|
||||
if ( Math.round(table.outerWidth()) < Math.round(sanityWidth) )
|
||||
{
|
||||
// The min width depends upon if we have a vertical scrollbar visible or not */
|
||||
correction = ((divBodyEl.scrollHeight > divBodyEl.offsetHeight ||
|
||||
@@ -6493,16 +6483,27 @@
|
||||
// Store the saved state so it might be accessed at any time
|
||||
settings.oLoadedState = $.extend( true, {}, s );
|
||||
|
||||
// Page Length
|
||||
if ( s.length !== undefined ) {
|
||||
// If already initialised just set the value directly so that the select element is also updated
|
||||
if (api) {
|
||||
api.page.len(s.length)
|
||||
}
|
||||
else {
|
||||
settings._iDisplayLength = s.length;
|
||||
}
|
||||
}
|
||||
|
||||
// Restore key features - todo - for 1.11 this needs to be done by
|
||||
// subscribed events
|
||||
if ( s.start !== undefined ) {
|
||||
settings._iDisplayStart = s.start;
|
||||
if(api === null) {
|
||||
settings._iDisplayStart = s.start;
|
||||
settings.iInitDisplayStart = s.start;
|
||||
}
|
||||
}
|
||||
if ( s.length !== undefined ) {
|
||||
settings._iDisplayLength = s.length;
|
||||
else {
|
||||
_fnPageChange(settings, s.start/settings._iDisplayLength);
|
||||
}
|
||||
}
|
||||
|
||||
// Order
|
||||
@@ -6844,7 +6845,7 @@
|
||||
return 'dom';
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
@@ -7236,8 +7237,10 @@
|
||||
|
||||
pluck: function ( prop )
|
||||
{
|
||||
let fn = DataTable.util.get(prop);
|
||||
|
||||
return this.map( function ( el ) {
|
||||
return el[ prop ];
|
||||
return fn(el);
|
||||
} );
|
||||
},
|
||||
|
||||
@@ -8331,22 +8334,35 @@
|
||||
|
||||
$(document).on('plugin-init.dt', function (e, context) {
|
||||
var api = new _Api( context );
|
||||
api.on( 'stateSaveParams', function ( e, settings, data ) {
|
||||
var indexes = api.rows().iterator( 'row', function ( settings, idx ) {
|
||||
return settings.aoData[idx]._detailsShow ? idx : undefined;
|
||||
});
|
||||
|
||||
data.childRows = api.rows( indexes ).ids( true ).toArray();
|
||||
api.on( 'stateSaveParams', function ( e, settings, d ) {
|
||||
// This could be more compact with the API, but it is a lot faster as a simple
|
||||
// internal loop
|
||||
var idFn = settings.rowIdFn;
|
||||
var data = settings.aoData;
|
||||
var ids = [];
|
||||
|
||||
for (var i=0 ; i<data.length ; i++) {
|
||||
if (data[i]._detailsShow) {
|
||||
ids.push( '#' + idFn(data[i]._aData) );
|
||||
}
|
||||
}
|
||||
|
||||
d.childRows = ids;
|
||||
})
|
||||
|
||||
var loaded = api.state.loaded();
|
||||
|
||||
if ( loaded && loaded.childRows ) {
|
||||
api.rows( loaded.childRows ).every( function () {
|
||||
_fnCallbackFire( context, null, 'requestChild', [ this ] )
|
||||
})
|
||||
api
|
||||
.rows( $.map(loaded.childRows, function (id){
|
||||
return id.replace(/:/g, '\\:')
|
||||
}) )
|
||||
.every( function () {
|
||||
_fnCallbackFire( context, null, 'requestChild', [ this ] )
|
||||
});
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
var __details_add = function ( ctx, row, data, klass )
|
||||
{
|
||||
@@ -8393,6 +8409,15 @@
|
||||
};
|
||||
|
||||
|
||||
// Make state saving of child row details async to allow them to be batch processed
|
||||
var __details_state = DataTable.util.throttle(
|
||||
function (ctx) {
|
||||
_fnSaveState( ctx[0] )
|
||||
},
|
||||
500
|
||||
);
|
||||
|
||||
|
||||
var __details_remove = function ( api, idx )
|
||||
{
|
||||
var ctx = api.context;
|
||||
@@ -8406,7 +8431,7 @@
|
||||
row._detailsShow = undefined;
|
||||
row._details = undefined;
|
||||
$( row.nTr ).removeClass( 'dt-hasChild' );
|
||||
_fnSaveState( ctx[0] );
|
||||
__details_state( ctx );
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -8433,7 +8458,7 @@
|
||||
_fnCallbackFire( ctx[0], null, 'childRow', [ show, api.row( api[0] ) ] )
|
||||
|
||||
__details_events( ctx[0] );
|
||||
_fnSaveState( ctx[0] );
|
||||
__details_state( ctx );
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -8444,7 +8469,7 @@
|
||||
var api = new _Api( settings );
|
||||
var namespace = '.dt.DT_details';
|
||||
var drawEvent = 'draw'+namespace;
|
||||
var colvisEvent = 'column-visibility'+namespace;
|
||||
var colvisEvent = 'column-sizing'+namespace;
|
||||
var destroyEvent = 'destroy'+namespace;
|
||||
var data = settings.aoData;
|
||||
|
||||
@@ -9496,7 +9521,6 @@
|
||||
remove = remove || false;
|
||||
|
||||
return this.iterator( 'table', function ( settings ) {
|
||||
var orig = settings.nTableWrapper.parentNode;
|
||||
var classes = settings.oClasses;
|
||||
var table = settings.nTable;
|
||||
var tbody = settings.nTBody;
|
||||
@@ -9551,6 +9575,8 @@
|
||||
jqTbody.children().detach();
|
||||
jqTbody.append( rows );
|
||||
|
||||
var orig = settings.nTableWrapper.parentNode;
|
||||
|
||||
// Remove the DataTables generated nodes, events and classes
|
||||
var removedMethod = remove ? 'remove' : 'detach';
|
||||
jqTable[ removedMethod ]();
|
||||
@@ -9635,7 +9661,7 @@
|
||||
}
|
||||
|
||||
return resolved.replace( '%d', plural ); // nb: plural might be undefined,
|
||||
} );
|
||||
} );
|
||||
/**
|
||||
* Version string for plug-ins to check compatibility. Allowed format is
|
||||
* `a.b.c-d` where: a:int, b:int, c:int, d:string(dev|beta|alpha). `d` is used
|
||||
@@ -9644,8 +9670,8 @@
|
||||
* @type string
|
||||
* @default Version number
|
||||
*/
|
||||
DataTable.version = "1.11.3";
|
||||
|
||||
DataTable.version = "1.12.1";
|
||||
|
||||
/**
|
||||
* Private data store, containing all of the settings objects that are
|
||||
* created for the tables on a given page.
|
||||
@@ -9659,7 +9685,7 @@
|
||||
* @private
|
||||
*/
|
||||
DataTable.settings = [];
|
||||
|
||||
|
||||
/**
|
||||
* Object models container, for the various models that DataTables has
|
||||
* available to it. These models define the objects that are used to hold
|
||||
@@ -11849,7 +11875,6 @@
|
||||
* Text which is displayed when the table is processing a user action
|
||||
* (usually a sort command or similar).
|
||||
* @type string
|
||||
* @default Processing...
|
||||
*
|
||||
* @dtopt Language
|
||||
* @name DataTable.defaults.language.processing
|
||||
@@ -11863,7 +11888,7 @@
|
||||
* } );
|
||||
* } );
|
||||
*/
|
||||
"sProcessing": "Processing...",
|
||||
"sProcessing": "",
|
||||
|
||||
|
||||
/**
|
||||
@@ -14017,7 +14042,7 @@
|
||||
*/
|
||||
"rowId": null
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Extension object for DataTables that is used to provide all extension
|
||||
* options.
|
||||
@@ -14069,7 +14094,7 @@
|
||||
*
|
||||
* @type string
|
||||
*/
|
||||
build:"bs5/dt-1.11.3",
|
||||
build:"bs5/dt-1.12.1",
|
||||
|
||||
|
||||
/**
|
||||
@@ -15115,6 +15140,213 @@
|
||||
d;
|
||||
};
|
||||
|
||||
// Common logic for moment, luxon or a date action
|
||||
function __mld( dt, momentFn, luxonFn, dateFn, arg1 ) {
|
||||
if (window.moment) {
|
||||
return dt[momentFn]( arg1 );
|
||||
}
|
||||
else if (window.luxon) {
|
||||
return dt[luxonFn]( arg1 );
|
||||
}
|
||||
|
||||
return dateFn ? dt[dateFn]( arg1 ) : dt;
|
||||
}
|
||||
|
||||
|
||||
var __mlWarning = false;
|
||||
function __mldObj (d, format, locale) {
|
||||
var dt;
|
||||
|
||||
if (window.moment) {
|
||||
dt = window.moment.utc( d, format, locale, true );
|
||||
|
||||
if (! dt.isValid()) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
else if (window.luxon) {
|
||||
dt = format
|
||||
? window.luxon.DateTime.fromFormat( d, format )
|
||||
: window.luxon.DateTime.fromISO( d );
|
||||
|
||||
if (! dt.isValid) {
|
||||
return null;
|
||||
}
|
||||
|
||||
dt.setLocale(locale);
|
||||
}
|
||||
else if (! format) {
|
||||
// No format given, must be ISO
|
||||
dt = new Date(d);
|
||||
}
|
||||
else {
|
||||
if (! __mlWarning) {
|
||||
alert('DataTables warning: Formatted date without Moment.js or Luxon - https://datatables.net/tn/17');
|
||||
}
|
||||
|
||||
__mlWarning = true;
|
||||
}
|
||||
|
||||
return dt;
|
||||
}
|
||||
|
||||
// Wrapper for date, datetime and time which all operate the same way with the exception of
|
||||
// the output string for auto locale support
|
||||
function __mlHelper (localeString) {
|
||||
return function ( from, to, locale, def ) {
|
||||
// Luxon and Moment support
|
||||
// Argument shifting
|
||||
if ( arguments.length === 0 ) {
|
||||
locale = 'en';
|
||||
to = null; // means toLocaleString
|
||||
from = null; // means iso8601
|
||||
}
|
||||
else if ( arguments.length === 1 ) {
|
||||
locale = 'en';
|
||||
to = from;
|
||||
from = null;
|
||||
}
|
||||
else if ( arguments.length === 2 ) {
|
||||
locale = to;
|
||||
to = from;
|
||||
from = null;
|
||||
}
|
||||
|
||||
var typeName = 'datetime-' + to;
|
||||
|
||||
// Add type detection and sorting specific to this date format - we need to be able to identify
|
||||
// date type columns as such, rather than as numbers in extensions. Hence the need for this.
|
||||
if (! DataTable.ext.type.order[typeName]) {
|
||||
// The renderer will give the value to type detect as the type!
|
||||
DataTable.ext.type.detect.unshift(function (d) {
|
||||
return d === typeName ? typeName : false;
|
||||
});
|
||||
|
||||
// The renderer gives us Moment, Luxon or Date obects for the sorting, all of which have a
|
||||
// `valueOf` which gives milliseconds epoch
|
||||
DataTable.ext.type.order[typeName + '-asc'] = function (a, b) {
|
||||
var x = a.valueOf();
|
||||
var y = b.valueOf();
|
||||
|
||||
return x === y
|
||||
? 0
|
||||
: x < y
|
||||
? -1
|
||||
: 1;
|
||||
}
|
||||
|
||||
DataTable.ext.type.order[typeName + '-desc'] = function (a, b) {
|
||||
var x = a.valueOf();
|
||||
var y = b.valueOf();
|
||||
|
||||
return x === y
|
||||
? 0
|
||||
: x > y
|
||||
? -1
|
||||
: 1;
|
||||
}
|
||||
}
|
||||
|
||||
return function ( d, type ) {
|
||||
// Allow for a default value
|
||||
if (d === null || d === undefined) {
|
||||
if (def === '--now') {
|
||||
// We treat everything as UTC further down, so no changes are
|
||||
// made, as such need to get the local date / time as if it were
|
||||
// UTC
|
||||
var local = new Date();
|
||||
d = new Date( Date.UTC(
|
||||
local.getFullYear(), local.getMonth(), local.getDate(),
|
||||
local.getHours(), local.getMinutes(), local.getSeconds()
|
||||
) );
|
||||
}
|
||||
else {
|
||||
d = '';
|
||||
}
|
||||
}
|
||||
|
||||
if (type === 'type') {
|
||||
// Typing uses the type name for fast matching
|
||||
return typeName;
|
||||
}
|
||||
|
||||
if (d === '') {
|
||||
return type !== 'sort'
|
||||
? ''
|
||||
: __mldObj('0000-01-01 00:00:00', null, locale);
|
||||
}
|
||||
|
||||
// Shortcut. If `from` and `to` are the same, we are using the renderer to
|
||||
// format for ordering, not display - its already in the display format.
|
||||
if ( to !== null && from === to && type !== 'sort' && type !== 'type' && ! (d instanceof Date) ) {
|
||||
return d;
|
||||
}
|
||||
|
||||
var dt = __mldObj(d, from, locale);
|
||||
|
||||
if (dt === null) {
|
||||
return d;
|
||||
}
|
||||
|
||||
if (type === 'sort') {
|
||||
return dt;
|
||||
}
|
||||
|
||||
var formatted = to === null
|
||||
? __mld(dt, 'toDate', 'toJSDate', '')[localeString]()
|
||||
: __mld(dt, 'format', 'toFormat', 'toISOString', to);
|
||||
|
||||
// XSS protection
|
||||
return type === 'display' ?
|
||||
__htmlEscapeEntities( formatted ) :
|
||||
formatted;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Based on locale, determine standard number formatting
|
||||
// Fallback for legacy browsers is US English
|
||||
var __thousands = ',';
|
||||
var __decimal = '.';
|
||||
|
||||
if (Intl) {
|
||||
try {
|
||||
var num = new Intl.NumberFormat().formatToParts(100000.1);
|
||||
|
||||
for (var i=0 ; i<num.length ; i++) {
|
||||
if (num[i].type === 'group') {
|
||||
__thousands = num[i].value;
|
||||
}
|
||||
else if (num[i].type === 'decimal') {
|
||||
__decimal = num[i].value;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// noop
|
||||
}
|
||||
}
|
||||
|
||||
// Formatted date time detection - use by declaring the formats you are going to use
|
||||
DataTable.datetime = function ( format, locale ) {
|
||||
var typeName = 'datetime-detect-' + format;
|
||||
|
||||
if (! locale) {
|
||||
locale = 'en';
|
||||
}
|
||||
|
||||
if (! DataTable.ext.type.order[typeName]) {
|
||||
DataTable.ext.type.detect.unshift(function (d) {
|
||||
var dt = __mldObj(d, format, locale);
|
||||
return d === '' || dt ? typeName : false;
|
||||
});
|
||||
|
||||
DataTable.ext.type.order[typeName + '-pre'] = function (d) {
|
||||
return __mldObj(d, format, locale) || 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helpers for `columns.render`.
|
||||
*
|
||||
@@ -15142,13 +15374,29 @@
|
||||
* @namespace
|
||||
*/
|
||||
DataTable.render = {
|
||||
date: __mlHelper('toLocaleDateString'),
|
||||
datetime: __mlHelper('toLocaleString'),
|
||||
time: __mlHelper('toLocaleTimeString'),
|
||||
number: function ( thousands, decimal, precision, prefix, postfix ) {
|
||||
// Auto locale detection
|
||||
if (thousands === null || thousands === undefined) {
|
||||
thousands = __thousands;
|
||||
}
|
||||
|
||||
if (decimal === null || decimal === undefined) {
|
||||
decimal = __decimal;
|
||||
}
|
||||
|
||||
return {
|
||||
display: function ( d ) {
|
||||
if ( typeof d !== 'number' && typeof d !== 'string' ) {
|
||||
return d;
|
||||
}
|
||||
|
||||
if (d === '' || d === null) {
|
||||
return d;
|
||||
}
|
||||
|
||||
var negative = d < 0 ? '-' : '';
|
||||
var flo = parseFloat( d );
|
||||
|
||||
@@ -15317,29 +15565,29 @@
|
||||
// added to prevent errors
|
||||
} );
|
||||
|
||||
|
||||
|
||||
// jQuery access
|
||||
$.fn.dataTable = DataTable;
|
||||
|
||||
|
||||
// Provide access to the host jQuery object (circular reference)
|
||||
DataTable.$ = $;
|
||||
|
||||
|
||||
// Legacy aliases
|
||||
$.fn.dataTableSettings = DataTable.settings;
|
||||
$.fn.dataTableExt = DataTable.ext;
|
||||
|
||||
|
||||
// With a capital `D` we return a DataTables API instance rather than a
|
||||
// jQuery object
|
||||
$.fn.DataTable = function ( opts ) {
|
||||
return $(this).dataTable( opts ).api();
|
||||
};
|
||||
|
||||
|
||||
// All properties that are available to $.fn.dataTable should also be
|
||||
// available on $.fn.DataTable
|
||||
$.each( DataTable, function ( prop, val ) {
|
||||
$.fn.DataTable[ prop ] = val;
|
||||
} );
|
||||
|
||||
|
||||
return DataTable;
|
||||
}));
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* jQuery JavaScript Library v3.6.0 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/Tween,-effects/animatedSelector
|
||||
* jQuery JavaScript Library v3.6.1 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/Tween,-effects/animatedSelector
|
||||
* https://jquery.com/
|
||||
*
|
||||
* Includes Sizzle.js
|
||||
@@ -9,7 +9,7 @@
|
||||
* Released under the MIT license
|
||||
* https://jquery.org/license
|
||||
*
|
||||
* Date: 2021-03-02T17:08Z
|
||||
* Date: 2022-08-26T17:52Z
|
||||
*/
|
||||
( function( global, factory ) {
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
// (such as Node.js), expose a factory as module.exports.
|
||||
// This accentuates the need for the creation of a real `window`.
|
||||
// e.g. var jQuery = require("jquery")(window);
|
||||
// See ticket #14549 for more info.
|
||||
// See ticket trac-14549 for more info.
|
||||
module.exports = global.document ?
|
||||
factory( global, true ) :
|
||||
function( w ) {
|
||||
@@ -151,7 +151,7 @@ function toType( obj ) {
|
||||
|
||||
|
||||
var
|
||||
version = "3.6.0 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/Tween,-effects/animatedSelector",
|
||||
version = "3.6.1 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/Tween,-effects/animatedSelector",
|
||||
|
||||
// Define a local copy of jQuery
|
||||
jQuery = function( selector, context ) {
|
||||
@@ -3129,8 +3129,8 @@ jQuery.fn.extend( {
|
||||
var rootjQuery,
|
||||
|
||||
// A simple way to check for HTML strings
|
||||
// Prioritize #id over <tag> to avoid XSS via location.hash (#9521)
|
||||
// Strict HTML recognition (#11290: must start with <)
|
||||
// Prioritize #id over <tag> to avoid XSS via location.hash (trac-9521)
|
||||
// Strict HTML recognition (trac-11290: must start with <)
|
||||
// Shortcut simple #id case for speed
|
||||
rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,
|
||||
|
||||
@@ -4087,7 +4087,7 @@ jQuery.extend( {
|
||||
isReady: false,
|
||||
|
||||
// A counter to track how many items to wait for before
|
||||
// the ready event fires. See #6781
|
||||
// the ready event fires. See trac-6781
|
||||
readyWait: 1,
|
||||
|
||||
// Handle when the DOM is ready
|
||||
@@ -4215,7 +4215,7 @@ function fcamelCase( _all, letter ) {
|
||||
|
||||
// Convert dashed to camelCase; used by the css and data modules
|
||||
// Support: IE <=9 - 11, Edge 12 - 15
|
||||
// Microsoft forgot to hump their vendor prefix (#9572)
|
||||
// Microsoft forgot to hump their vendor prefix (trac-9572)
|
||||
function camelCase( string ) {
|
||||
return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
|
||||
}
|
||||
@@ -4251,7 +4251,7 @@ Data.prototype = {
|
||||
value = {};
|
||||
|
||||
// We can accept data for non-element nodes in modern browsers,
|
||||
// but we should not, see #8335.
|
||||
// but we should not, see trac-8335.
|
||||
// Always return an empty object.
|
||||
if ( acceptData( owner ) ) {
|
||||
|
||||
@@ -4490,7 +4490,7 @@ jQuery.fn.extend( {
|
||||
while ( i-- ) {
|
||||
|
||||
// Support: IE 11 only
|
||||
// The attrs elements can be null (#14894)
|
||||
// The attrs elements can be null (trac-14894)
|
||||
if ( attrs[ i ] ) {
|
||||
name = attrs[ i ].name;
|
||||
if ( name.indexOf( "data-" ) === 0 ) {
|
||||
@@ -4913,9 +4913,9 @@ var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i );
|
||||
input = document.createElement( "input" );
|
||||
|
||||
// Support: Android 4.0 - 4.3 only
|
||||
// Check state lost if the name is set (#11217)
|
||||
// Check state lost if the name is set (trac-11217)
|
||||
// Support: Windows Web Apps (WWA)
|
||||
// `name` and `type` must use .setAttribute for WWA (#14901)
|
||||
// `name` and `type` must use .setAttribute for WWA (trac-14901)
|
||||
input.setAttribute( "type", "radio" );
|
||||
input.setAttribute( "checked", "checked" );
|
||||
input.setAttribute( "name", "t" );
|
||||
@@ -4939,7 +4939,7 @@ var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i );
|
||||
} )();
|
||||
|
||||
|
||||
// We have to close these tags to support XHTML (#13200)
|
||||
// We have to close these tags to support XHTML (trac-13200)
|
||||
var wrapMap = {
|
||||
|
||||
// XHTML parsers do not magically insert elements in the
|
||||
@@ -4965,7 +4965,7 @@ if ( !support.option ) {
|
||||
function getAll( context, tag ) {
|
||||
|
||||
// Support: IE <=9 - 11 only
|
||||
// Use typeof to avoid zero-argument method invocation on host objects (#15151)
|
||||
// Use typeof to avoid zero-argument method invocation on host objects (trac-15151)
|
||||
var ret;
|
||||
|
||||
if ( typeof context.getElementsByTagName !== "undefined" ) {
|
||||
@@ -5048,7 +5048,7 @@ function buildFragment( elems, context, scripts, selection, ignored ) {
|
||||
// Remember the top-level container
|
||||
tmp = fragment.firstChild;
|
||||
|
||||
// Ensure the created nodes are orphaned (#12392)
|
||||
// Ensure the created nodes are orphaned (trac-12392)
|
||||
tmp.textContent = "";
|
||||
}
|
||||
}
|
||||
@@ -5469,15 +5469,15 @@ jQuery.event = {
|
||||
|
||||
for ( ; cur !== this; cur = cur.parentNode || this ) {
|
||||
|
||||
// Don't check non-elements (#13208)
|
||||
// Don't process clicks on disabled elements (#6911, #8165, #11382, #11764)
|
||||
// Don't check non-elements (trac-13208)
|
||||
// Don't process clicks on disabled elements (trac-6911, trac-8165, trac-11382, trac-11764)
|
||||
if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) {
|
||||
matchedHandlers = [];
|
||||
matchedSelectors = {};
|
||||
for ( i = 0; i < delegateCount; i++ ) {
|
||||
handleObj = handlers[ i ];
|
||||
|
||||
// Don't conflict with Object.prototype properties (#13203)
|
||||
// Don't conflict with Object.prototype properties (trac-13203)
|
||||
sel = handleObj.selector + " ";
|
||||
|
||||
if ( matchedSelectors[ sel ] === undefined ) {
|
||||
@@ -5731,7 +5731,7 @@ jQuery.Event = function( src, props ) {
|
||||
|
||||
// Create target properties
|
||||
// Support: Safari <=6 - 7 only
|
||||
// Target should not be a text node (#504, #13143)
|
||||
// Target should not be a text node (trac-504, trac-13143)
|
||||
this.target = ( src.target && src.target.nodeType === 3 ) ?
|
||||
src.target.parentNode :
|
||||
src.target;
|
||||
@@ -5854,10 +5854,10 @@ jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateTyp
|
||||
return true;
|
||||
},
|
||||
|
||||
// Suppress native focus or blur as it's already being fired
|
||||
// in leverageNative.
|
||||
_default: function() {
|
||||
return true;
|
||||
// Suppress native focus or blur if we're currently inside
|
||||
// a leveraged native-event stack
|
||||
_default: function( event ) {
|
||||
return dataPriv.get( event.target, type );
|
||||
},
|
||||
|
||||
delegateType: delegateType
|
||||
@@ -5956,7 +5956,8 @@ var
|
||||
|
||||
// checked="checked" or checked
|
||||
rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i,
|
||||
rcleanScript = /^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;
|
||||
|
||||
rcleanScript = /^\s*<!\[CDATA\[|\]\]>\s*$/g;
|
||||
|
||||
// Prefer a tbody over its parent table for containing new rows
|
||||
function manipulationTarget( elem, content ) {
|
||||
@@ -6070,7 +6071,7 @@ function domManip( collection, args, callback, ignored ) {
|
||||
|
||||
// Use the original fragment for the last item
|
||||
// instead of the first because it can end up
|
||||
// being emptied incorrectly in certain situations (#8070).
|
||||
// being emptied incorrectly in certain situations (trac-8070).
|
||||
for ( ; i < l; i++ ) {
|
||||
node = fragment;
|
||||
|
||||
@@ -6111,6 +6112,12 @@ function domManip( collection, args, callback, ignored ) {
|
||||
}, doc );
|
||||
}
|
||||
} else {
|
||||
|
||||
// Unwrap a CDATA section containing script contents. This shouldn't be
|
||||
// needed as in XML documents they're already not visible when
|
||||
// inspecting element contents and in HTML documents they have no
|
||||
// meaning but we're preserving that logic for backwards compatibility.
|
||||
// This will be removed completely in 4.0. See gh-4904.
|
||||
DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc );
|
||||
}
|
||||
}
|
||||
@@ -6393,9 +6400,12 @@ jQuery.each( {
|
||||
} );
|
||||
var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" );
|
||||
|
||||
var rcustomProp = /^--/;
|
||||
|
||||
|
||||
var getStyles = function( elem ) {
|
||||
|
||||
// Support: IE <=11 only, Firefox <=30 (#15098, #14150)
|
||||
// Support: IE <=11 only, Firefox <=30 (trac-15098, trac-14150)
|
||||
// IE throws on elements created in popups
|
||||
// FF meanwhile throws on frame elements through "defaultView.getComputedStyle"
|
||||
var view = elem.ownerDocument.defaultView;
|
||||
@@ -6430,6 +6440,15 @@ var swap = function( elem, options, callback ) {
|
||||
|
||||
var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" );
|
||||
|
||||
var whitespace = "[\\x20\\t\\r\\n\\f]";
|
||||
|
||||
|
||||
var rtrimCSS = new RegExp(
|
||||
"^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$",
|
||||
"g"
|
||||
);
|
||||
|
||||
|
||||
|
||||
|
||||
( function() {
|
||||
@@ -6495,7 +6514,7 @@ var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" );
|
||||
}
|
||||
|
||||
// Support: IE <=9 - 11 only
|
||||
// Style of cloned element affects source element cloned (#8908)
|
||||
// Style of cloned element affects source element cloned (trac-8908)
|
||||
div.style.backgroundClip = "content-box";
|
||||
div.cloneNode( true ).style.backgroundClip = "";
|
||||
support.clearCloneStyle = div.style.backgroundClip === "content-box";
|
||||
@@ -6575,6 +6594,7 @@ var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" );
|
||||
|
||||
function curCSS( elem, name, computed ) {
|
||||
var width, minWidth, maxWidth, ret,
|
||||
isCustomProp = rcustomProp.test( name ),
|
||||
|
||||
// Support: Firefox 51+
|
||||
// Retrieving style before computed somehow
|
||||
@@ -6585,11 +6605,22 @@ function curCSS( elem, name, computed ) {
|
||||
computed = computed || getStyles( elem );
|
||||
|
||||
// getPropertyValue is needed for:
|
||||
// .css('filter') (IE 9 only, #12537)
|
||||
// .css('--customProperty) (#3144)
|
||||
// .css('filter') (IE 9 only, trac-12537)
|
||||
// .css('--customProperty) (gh-3144)
|
||||
if ( computed ) {
|
||||
ret = computed.getPropertyValue( name ) || computed[ name ];
|
||||
|
||||
// trim whitespace for custom property (issue gh-4926)
|
||||
if ( isCustomProp ) {
|
||||
|
||||
// rtrim treats U+000D CARRIAGE RETURN and U+000C FORM FEED
|
||||
// as whitespace while CSS does not, but this is not a problem
|
||||
// because CSS preprocessing replaces them with U+000A LINE FEED
|
||||
// (which *is* CSS whitespace)
|
||||
// https://www.w3.org/TR/css-syntax-3/#input-preprocessing
|
||||
ret = ret.replace( rtrimCSS, "$1" );
|
||||
}
|
||||
|
||||
if ( ret === "" && !isAttached( elem ) ) {
|
||||
ret = jQuery.style( elem, name );
|
||||
}
|
||||
@@ -6685,7 +6716,6 @@ var
|
||||
// except "table", "table-cell", or "table-caption"
|
||||
// See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
|
||||
rdisplayswap = /^(none|table(?!-c[ea]).+)/,
|
||||
rcustomProp = /^--/,
|
||||
cssShow = { position: "absolute", visibility: "hidden", display: "block" },
|
||||
cssNormalTransform = {
|
||||
letterSpacing: "0",
|
||||
@@ -6921,15 +6951,15 @@ jQuery.extend( {
|
||||
if ( value !== undefined ) {
|
||||
type = typeof value;
|
||||
|
||||
// Convert "+=" or "-=" to relative numbers (#7345)
|
||||
// Convert "+=" or "-=" to relative numbers (trac-7345)
|
||||
if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) {
|
||||
value = adjustCSS( elem, name, ret );
|
||||
|
||||
// Fixes bug #9237
|
||||
// Fixes bug trac-9237
|
||||
type = "number";
|
||||
}
|
||||
|
||||
// Make sure that null and NaN values aren't set (#7116)
|
||||
// Make sure that null and NaN values aren't set (trac-7116)
|
||||
if ( value == null || value !== value ) {
|
||||
return;
|
||||
}
|
||||
@@ -7149,7 +7179,6 @@ jQuery.fn.extend( {
|
||||
|
||||
|
||||
// Based off of the plugin by Clint Helfers, with permission.
|
||||
// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/
|
||||
jQuery.fn.delay = function( time, type ) {
|
||||
time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time;
|
||||
type = type || "fx";
|
||||
@@ -7374,8 +7403,7 @@ jQuery.extend( {
|
||||
// Support: IE <=9 - 11 only
|
||||
// elem.tabIndex doesn't always return the
|
||||
// correct value when it hasn't been explicitly set
|
||||
// https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
|
||||
// Use proper attribute retrieval(#12072)
|
||||
// Use proper attribute retrieval (trac-12072)
|
||||
var tabindex = jQuery.find.attr( elem, "tabindex" );
|
||||
|
||||
if ( tabindex ) {
|
||||
@@ -7479,8 +7507,7 @@ function classesToArray( value ) {
|
||||
|
||||
jQuery.fn.extend( {
|
||||
addClass: function( value ) {
|
||||
var classes, elem, cur, curValue, clazz, j, finalValue,
|
||||
i = 0;
|
||||
var classNames, cur, curValue, className, i, finalValue;
|
||||
|
||||
if ( isFunction( value ) ) {
|
||||
return this.each( function( j ) {
|
||||
@@ -7488,36 +7515,35 @@ jQuery.fn.extend( {
|
||||
} );
|
||||
}
|
||||
|
||||
classes = classesToArray( value );
|
||||
classNames = classesToArray( value );
|
||||
|
||||
if ( classes.length ) {
|
||||
while ( ( elem = this[ i++ ] ) ) {
|
||||
curValue = getClass( elem );
|
||||
cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
|
||||
if ( classNames.length ) {
|
||||
return this.each( function() {
|
||||
curValue = getClass( this );
|
||||
cur = this.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
|
||||
|
||||
if ( cur ) {
|
||||
j = 0;
|
||||
while ( ( clazz = classes[ j++ ] ) ) {
|
||||
if ( cur.indexOf( " " + clazz + " " ) < 0 ) {
|
||||
cur += clazz + " ";
|
||||
for ( i = 0; i < classNames.length; i++ ) {
|
||||
className = classNames[ i ];
|
||||
if ( cur.indexOf( " " + className + " " ) < 0 ) {
|
||||
cur += className + " ";
|
||||
}
|
||||
}
|
||||
|
||||
// Only assign if different to avoid unneeded rendering.
|
||||
finalValue = stripAndCollapse( cur );
|
||||
if ( curValue !== finalValue ) {
|
||||
elem.setAttribute( "class", finalValue );
|
||||
this.setAttribute( "class", finalValue );
|
||||
}
|
||||
}
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
return this;
|
||||
},
|
||||
|
||||
removeClass: function( value ) {
|
||||
var classes, elem, cur, curValue, clazz, j, finalValue,
|
||||
i = 0;
|
||||
var classNames, cur, curValue, className, i, finalValue;
|
||||
|
||||
if ( isFunction( value ) ) {
|
||||
return this.each( function( j ) {
|
||||
@@ -7529,45 +7555,42 @@ jQuery.fn.extend( {
|
||||
return this.attr( "class", "" );
|
||||
}
|
||||
|
||||
classes = classesToArray( value );
|
||||
classNames = classesToArray( value );
|
||||
|
||||
if ( classes.length ) {
|
||||
while ( ( elem = this[ i++ ] ) ) {
|
||||
curValue = getClass( elem );
|
||||
if ( classNames.length ) {
|
||||
return this.each( function() {
|
||||
curValue = getClass( this );
|
||||
|
||||
// This expression is here for better compressibility (see addClass)
|
||||
cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
|
||||
cur = this.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
|
||||
|
||||
if ( cur ) {
|
||||
j = 0;
|
||||
while ( ( clazz = classes[ j++ ] ) ) {
|
||||
for ( i = 0; i < classNames.length; i++ ) {
|
||||
className = classNames[ i ];
|
||||
|
||||
// Remove *all* instances
|
||||
while ( cur.indexOf( " " + clazz + " " ) > -1 ) {
|
||||
cur = cur.replace( " " + clazz + " ", " " );
|
||||
while ( cur.indexOf( " " + className + " " ) > -1 ) {
|
||||
cur = cur.replace( " " + className + " ", " " );
|
||||
}
|
||||
}
|
||||
|
||||
// Only assign if different to avoid unneeded rendering.
|
||||
finalValue = stripAndCollapse( cur );
|
||||
if ( curValue !== finalValue ) {
|
||||
elem.setAttribute( "class", finalValue );
|
||||
this.setAttribute( "class", finalValue );
|
||||
}
|
||||
}
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
return this;
|
||||
},
|
||||
|
||||
toggleClass: function( value, stateVal ) {
|
||||
var type = typeof value,
|
||||
var classNames, className, i, self,
|
||||
type = typeof value,
|
||||
isValidValue = type === "string" || Array.isArray( value );
|
||||
|
||||
if ( typeof stateVal === "boolean" && isValidValue ) {
|
||||
return stateVal ? this.addClass( value ) : this.removeClass( value );
|
||||
}
|
||||
|
||||
if ( isFunction( value ) ) {
|
||||
return this.each( function( i ) {
|
||||
jQuery( this ).toggleClass(
|
||||
@@ -7577,17 +7600,20 @@ jQuery.fn.extend( {
|
||||
} );
|
||||
}
|
||||
|
||||
return this.each( function() {
|
||||
var className, i, self, classNames;
|
||||
if ( typeof stateVal === "boolean" && isValidValue ) {
|
||||
return stateVal ? this.addClass( value ) : this.removeClass( value );
|
||||
}
|
||||
|
||||
classNames = classesToArray( value );
|
||||
|
||||
return this.each( function() {
|
||||
if ( isValidValue ) {
|
||||
|
||||
// Toggle individual class names
|
||||
i = 0;
|
||||
self = jQuery( this );
|
||||
classNames = classesToArray( value );
|
||||
|
||||
while ( ( className = classNames[ i++ ] ) ) {
|
||||
for ( i = 0; i < classNames.length; i++ ) {
|
||||
className = classNames[ i ];
|
||||
|
||||
// Check each className given, space separated list
|
||||
if ( self.hasClass( className ) ) {
|
||||
@@ -7721,7 +7747,7 @@ jQuery.extend( {
|
||||
val :
|
||||
|
||||
// Support: IE <=10 - 11 only
|
||||
// option.text throws exceptions (#14686, #14858)
|
||||
// option.text throws exceptions (trac-14686, trac-14858)
|
||||
// Strip and collapse whitespace
|
||||
// https://html.spec.whatwg.org/#strip-and-collapse-whitespace
|
||||
stripAndCollapse( jQuery.text( elem ) );
|
||||
@@ -7748,7 +7774,7 @@ jQuery.extend( {
|
||||
option = options[ i ];
|
||||
|
||||
// Support: IE <=9 only
|
||||
// IE8-9 doesn't update selected after form reset (#2551)
|
||||
// IE8-9 doesn't update selected after form reset (trac-2551)
|
||||
if ( ( option.selected || i === index ) &&
|
||||
|
||||
// Don't return options that are disabled or in a disabled optgroup
|
||||
@@ -7891,8 +7917,8 @@ jQuery.extend( jQuery.event, {
|
||||
return;
|
||||
}
|
||||
|
||||
// Determine event propagation path in advance, per W3C events spec (#9951)
|
||||
// Bubble up to document, then to window; watch for a global ownerDocument var (#9724)
|
||||
// Determine event propagation path in advance, per W3C events spec (trac-9951)
|
||||
// Bubble up to document, then to window; watch for a global ownerDocument var (trac-9724)
|
||||
if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) {
|
||||
|
||||
bubbleType = special.delegateType || type;
|
||||
@@ -7944,7 +7970,7 @@ jQuery.extend( jQuery.event, {
|
||||
acceptData( elem ) ) {
|
||||
|
||||
// Call a native DOM method on the target with the same name as the event.
|
||||
// Don't do default actions on window, that's where global variables be (#6170)
|
||||
// Don't do default actions on window, that's where global variables be (trac-6170)
|
||||
if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) {
|
||||
|
||||
// Don't re-trigger an onFOO event when we call its FOO() method
|
||||
@@ -8654,7 +8680,9 @@ jQuery.each(
|
||||
|
||||
// Support: Android <=4.0 only
|
||||
// Make sure we trim BOM and NBSP
|
||||
var rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;
|
||||
// Require that the "whitespace run" starts from a non-whitespace
|
||||
// to avoid O(N^2) behavior when the engine would try matching "\s+$" at each space position.
|
||||
var rtrim = /^[\s\uFEFF\xA0]+|([^\s\uFEFF\xA0])[\s\uFEFF\xA0]+$/g;
|
||||
|
||||
// Bind a function to a context, optionally partially applying any
|
||||
// arguments.
|
||||
@@ -8721,7 +8749,7 @@ jQuery.isNumeric = function( obj ) {
|
||||
jQuery.trim = function( text ) {
|
||||
return text == null ?
|
||||
"" :
|
||||
( text + "" ).replace( rtrim, "" );
|
||||
( text + "" ).replace( rtrim, "$1" );
|
||||
};
|
||||
|
||||
|
||||
@@ -8769,8 +8797,8 @@ jQuery.noConflict = function( deep ) {
|
||||
};
|
||||
|
||||
// Expose jQuery and $ identifiers, even in AMD
|
||||
// (#7102#comment:10, https://github.com/jquery/jquery/pull/557)
|
||||
// and CommonJS for browser emulators (#13566)
|
||||
// (trac-7102#comment:10, https://github.com/jquery/jquery/pull/557)
|
||||
// and CommonJS for browser emulators (trac-13566)
|
||||
if ( typeof noGlobal === "undefined" ) {
|
||||
window.jQuery = window.$ = jQuery;
|
||||
}
|
||||
@@ -20,6 +20,13 @@
|
||||
width: auto;
|
||||
margin: -5px 0 0 0;
|
||||
}
|
||||
/* Special alert-row class to use Bootstrap v5.2+ variable colors */
|
||||
.alert-row {
|
||||
--bs-alert-border: 1px solid var(--bs-alert-border-color);
|
||||
color: var(--bs-alert-color);
|
||||
background-color: var(--bs-alert-bg);
|
||||
border: var(--bs-alert-border);
|
||||
}
|
||||
</style>
|
||||
<script src="{{urlpath}}/vw_static/identicon.js"></script>
|
||||
<script>
|
||||
|
||||
@@ -140,8 +140,8 @@
|
||||
<span><b>Server:</b> {{page_data.server_time_local}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Date & Time (UTC)
|
||||
<span class="badge bg-success d-none" id="time-success" title="Time offsets seem to be correct.">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="time-warning" title="Time offsets are too mouch at drift.">Error</span>
|
||||
<span class="badge bg-success d-none" id="time-success" title="Server and browser times are within 30 seconds of each other.">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="time-warning" title="Server and browser times are more than 30 seconds apart.">Error</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="time-server" class="d-block"><b>Server:</b> <span id="time-server-string">{{page_data.server_time}}</span></span>
|
||||
@@ -151,7 +151,7 @@
|
||||
<dt class="col-sm-5">Domain configuration
|
||||
<span class="badge bg-success d-none" id="domain-success" title="The domain variable matches the browser location and seems to be configured correctly.">Match</span>
|
||||
<span class="badge bg-danger d-none" id="domain-warning" title="The domain variable does not match the browser location.
The domain variable does not seem to be configured correctly.
Some features may not work as expected!">No Match</span>
|
||||
<span class="badge bg-success d-none" id="https-success" title="Configurued to use HTTPS">HTTPS</span>
|
||||
<span class="badge bg-success d-none" id="https-success" title="Configured to use HTTPS">HTTPS</span>
|
||||
<span class="badge bg-danger d-none" id="https-warning" title="Not configured to use HTTPS.
Some features may not work as expected!">No HTTPS</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
@@ -168,8 +168,8 @@
|
||||
<dl class="row">
|
||||
<dd class="col-sm-12">
|
||||
If you need support please check the following links first before you create a new issue:
|
||||
<a href="https://vaultwarden.discourse.group/" target="_blank" rel="noreferrer">Vaultwarden Forum</a>
|
||||
| <a href="https://github.com/dani-garcia/vaultwarden/discussions" target="_blank" rel="noreferrer">Github Discussions</a>
|
||||
<a href="https://vaultwarden.discourse.group/" target="_blank" rel="noreferrer noopener">Vaultwarden Forum</a>
|
||||
| <a href="https://github.com/dani-garcia/vaultwarden/discussions" target="_blank" rel="noreferrer noopener">Github Discussions</a>
|
||||
</dd>
|
||||
</dl>
|
||||
<dl class="row">
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
<small>Please provide it below:</small>
|
||||
|
||||
<form class="form-inline" method="post">
|
||||
<input type="password" class="form-control w-50 mr-2" name="token" placeholder="Enter admin token">
|
||||
<input type="password" class="form-control w-50 mr-2" name="token" placeholder="Enter admin token" autofocus="autofocus">
|
||||
<button type="submit" class="btn btn-primary">Enter</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
</main>
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
</main>
|
||||
|
||||
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
|
||||
<script src="{{urlpath}}/vw_static/jquery-3.6.0.slim.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/jquery-3.6.1.slim.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/datatables.js"></script>
|
||||
<script>
|
||||
'use strict';
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<div class="small text-white mb-3">
|
||||
<span class="font-weight-bolder">NOTE:</span> The settings here override the environment variables. Once saved, it's recommended to stop setting them to avoid confusion.<br>
|
||||
This does not apply to the read-only section, which can only be set via environment variables.<br>
|
||||
Settings which are overridden are shown with <span class="is-overridden-true">double underscores</span>.
|
||||
Settings which are overridden are shown with <span class="is-overridden-true alert-row px-1">a yellow colored background</span>.
|
||||
</div>
|
||||
|
||||
<form class="form needs-validation" id="config-form" onsubmit="saveConfig(); return false;" novalidate>
|
||||
@@ -16,7 +16,7 @@
|
||||
<div id="g_{{group}}" class="card-body collapse">
|
||||
{{#each elements}}
|
||||
{{#if editable}}
|
||||
<div class="row my-2 align-items-center is-overridden-{{overridden}}" title="[{{name}}] {{doc.description}}">
|
||||
<div class="row my-2 align-items-center is-overridden-{{overridden}} alert-row" title="[{{name}}] {{doc.description}}">
|
||||
{{#case type "text" "number" "password"}}
|
||||
<label for="input_{{name}}" class="col-sm-3 col-form-label">{{doc.name}}</label>
|
||||
<div class="col-sm-8">
|
||||
@@ -71,16 +71,25 @@
|
||||
{{#each config}}
|
||||
{{#each elements}}
|
||||
{{#unless editable}}
|
||||
<div class="row my-2 align-items-center" title="[{{name}}] {{doc.description}}">
|
||||
<div class="row my-2 align-items-center alert-row" title="[{{name}}] {{doc.description}}">
|
||||
{{#case type "text" "number" "password"}}
|
||||
<label for="input_{{name}}" class="col-sm-3 col-form-label">{{doc.name}}</label>
|
||||
<div class="col-sm-8">
|
||||
<div class="input-group">
|
||||
<input readonly class="form-control" id="input_{{name}}" type="{{type}}"
|
||||
value="{{value}}" {{#if default}} placeholder="Default: {{default}}" {{/if}}>
|
||||
{{#case type "password"}}
|
||||
{{!--
|
||||
Also set the database_url input as password here.
|
||||
If we would set it to password in config.rs it will not be character masked for the support string.
|
||||
And sometimes this is more useful for providing support than just 3 asterisk.
|
||||
--}}
|
||||
{{#if (eq name "database_url")}}
|
||||
<input readonly class="form-control" id="input_{{name}}" type="password" value="{{value}}" {{#if default}} placeholder="Default: {{default}}" {{/if}}>
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="toggleVis('input_{{name}}');">Show/hide</button>
|
||||
{{/case}}
|
||||
{{else}}
|
||||
<input readonly class="form-control" id="input_{{name}}" type="{{type}}" value="{{value}}" {{#if default}} placeholder="Default: {{default}}" {{/if}}>
|
||||
{{#case type "password"}}
|
||||
<button class="btn btn-outline-secondary" type="button" onclick="toggleVis('input_{{name}}');">Show/hide</button>
|
||||
{{/case}}
|
||||
{{/if}}
|
||||
</div>
|
||||
</div>
|
||||
{{/case}}
|
||||
@@ -113,7 +122,7 @@
|
||||
This does not include any configuration or file attachment data that may
|
||||
also be needed to fully restore a vaultwarden instance. For details on
|
||||
how to perform complete backups, refer to the wiki page on
|
||||
<a href="https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault">backups</a>.
|
||||
<a href="https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault" target="_blank" rel="noopener noreferrer">backups</a>.
|
||||
</div>
|
||||
<button type="button" class="btn btn-primary" onclick="backupDatabase();">Backup Database</button>
|
||||
</div>
|
||||
@@ -134,7 +143,9 @@
|
||||
}
|
||||
|
||||
.is-overridden-true {
|
||||
text-decoration: underline double;
|
||||
--bs-alert-color: #664d03;
|
||||
--bs-alert-bg: #fff3cd;
|
||||
--bs-alert-border-color: #ffecb5;
|
||||
}
|
||||
</style>
|
||||
|
||||
@@ -238,19 +249,45 @@
|
||||
return Array.from(form).some(el => 'origValue' in el.dataset && ( el.dataset.origValue !== el.value));
|
||||
}
|
||||
|
||||
// Trigger Form Change Detection
|
||||
// This function will prevent submitting a from when someone presses enter.
|
||||
function preventFormSubmitOnEnter(form) {
|
||||
form.onkeypress = function(e) {
|
||||
let key = e.charCode || e.keyCode || 0;
|
||||
if (key == 13) {
|
||||
e.preventDefault();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize Form Change Detection
|
||||
const config_form = document.getElementById('config-form');
|
||||
initChangeDetection(config_form);
|
||||
// Prevent enter to submitting the form and save the config.
|
||||
// Users need to really click on save, this also to prevent accidental submits.
|
||||
preventFormSubmitOnEnter(config_form);
|
||||
|
||||
// This function will hook into the smtp-test-email input field and will call the smtpTest() function when enter is pressed.
|
||||
function submitTestEmailOnEnter() {
|
||||
const smtp_test_email_input = document.getElementById('smtp-test-email');
|
||||
smtp_test_email_input.onkeypress = function(e) {
|
||||
let key = e.charCode || e.keyCode || 0;
|
||||
if (key == 13) {
|
||||
e.preventDefault();
|
||||
smtpTest();
|
||||
}
|
||||
}
|
||||
}
|
||||
submitTestEmailOnEnter();
|
||||
|
||||
// Colorize some settings which are high risk
|
||||
const risk_items = document.getElementsByClassName('col-form-label');
|
||||
function colorRiskSettings(risk_el) {
|
||||
Array.from(risk_el).forEach((el) => {
|
||||
function colorRiskSettings() {
|
||||
const risk_items = document.getElementsByClassName('col-form-label');
|
||||
Array.from(risk_items).forEach((el) => {
|
||||
if (el.innerText.toLowerCase().includes('risks') ) {
|
||||
el.parentElement.className += ' alert-danger'
|
||||
}
|
||||
});
|
||||
}
|
||||
colorRiskSettings(risk_items);
|
||||
colorRiskSettings();
|
||||
|
||||
</script>
|
||||
|
||||
@@ -136,7 +136,7 @@
|
||||
</main>
|
||||
|
||||
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
|
||||
<script src="{{urlpath}}/vw_static/jquery-3.6.0.slim.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/jquery-3.6.1.slim.js"></script>
|
||||
<script src="{{urlpath}}/vw_static/datatables.js"></script>
|
||||
<script>
|
||||
'use strict';
|
||||
|
||||
253
src/util.rs
253
src/util.rs
@@ -1,23 +1,26 @@
|
||||
//
|
||||
// Web Headers and caching
|
||||
//
|
||||
use std::io::Cursor;
|
||||
use std::io::{Cursor, ErrorKind};
|
||||
|
||||
use rocket::{
|
||||
fairing::{Fairing, Info, Kind},
|
||||
http::{ContentType, Header, HeaderMap, Method, RawStr, Status},
|
||||
http::{ContentType, Header, HeaderMap, Method, Status},
|
||||
request::FromParam,
|
||||
response::{self, Responder},
|
||||
Data, Request, Response, Rocket,
|
||||
Data, Orbit, Request, Response, Rocket,
|
||||
};
|
||||
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use tokio::{
|
||||
runtime::Handle,
|
||||
time::{sleep, Duration},
|
||||
};
|
||||
|
||||
use crate::CONFIG;
|
||||
|
||||
pub struct AppHeaders();
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl Fairing for AppHeaders {
|
||||
fn info(&self) -> Info {
|
||||
Info {
|
||||
@@ -26,20 +29,72 @@ impl Fairing for AppHeaders {
|
||||
}
|
||||
}
|
||||
|
||||
fn on_response(&self, _req: &Request, res: &mut Response) {
|
||||
res.set_raw_header("Permissions-Policy", "accelerometer=(), ambient-light-sensor=(), autoplay=(), camera=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), sync-xhr=(self \"https://haveibeenpwned.com\" \"https://2fa.directory\"), usb=(), vr=()");
|
||||
async fn on_response<'r>(&self, req: &'r Request<'_>, res: &mut Response<'r>) {
|
||||
res.set_raw_header("Permissions-Policy", "accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()");
|
||||
res.set_raw_header("Referrer-Policy", "same-origin");
|
||||
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
|
||||
res.set_raw_header("X-Content-Type-Options", "nosniff");
|
||||
res.set_raw_header("X-XSS-Protection", "1; mode=block");
|
||||
let csp = format!(
|
||||
// Obsolete in modern browsers, unsafe (XS-Leak), and largely replaced by CSP
|
||||
res.set_raw_header("X-XSS-Protection", "0");
|
||||
|
||||
let req_uri_path = req.uri().path();
|
||||
|
||||
// Do not send the Content-Security-Policy (CSP) Header and X-Frame-Options for the *-connector.html files.
|
||||
// This can cause issues when some MFA requests needs to open a popup or page within the clients like WebAuthn, or Duo.
|
||||
// This is the same behaviour as upstream Bitwarden.
|
||||
if !req_uri_path.ends_with("connector.html") {
|
||||
// Check if we are requesting an admin page, if so, allow unsafe-inline for scripts.
|
||||
// TODO: In the future maybe we need to see if we can generate a sha256 hash or have no scripts inline at all.
|
||||
let admin_path = format!("{}/admin", CONFIG.domain_path());
|
||||
let mut script_src = "";
|
||||
if req_uri_path.starts_with(admin_path.as_str()) {
|
||||
script_src = " 'unsafe-inline'";
|
||||
}
|
||||
|
||||
// # Frame Ancestors:
|
||||
// Chrome Web Store: https://chrome.google.com/webstore/detail/bitwarden-free-password-m/nngceckbapebfimnlniiiahkandclblb
|
||||
// Edge Add-ons: https://microsoftedge.microsoft.com/addons/detail/bitwarden-free-password/jbkfoedolllekgbhcbcoahefnbanhhlh?hl=en-US
|
||||
// Firefox Browser Add-ons: https://addons.mozilla.org/en-US/firefox/addon/bitwarden-password-manager/
|
||||
"frame-ancestors 'self' chrome-extension://nngceckbapebfimnlniiiahkandclblb chrome-extension://jbkfoedolllekgbhcbcoahefnbanhhlh moz-extension://* {};",
|
||||
CONFIG.allowed_iframe_ancestors()
|
||||
);
|
||||
res.set_raw_header("Content-Security-Policy", csp);
|
||||
// # img/child/frame src:
|
||||
// Have I Been Pwned and Gravator to allow those calls to work.
|
||||
// # Connect src:
|
||||
// Leaked Passwords check: api.pwnedpasswords.com
|
||||
// 2FA/MFA Site check: 2fa.directory
|
||||
// # Mail Relay: https://bitwarden.com/blog/add-privacy-and-security-using-email-aliases-with-bitwarden/
|
||||
// app.simplelogin.io, app.anonaddy.com, api.fastmail.com, quack.duckduckgo.com
|
||||
let csp = format!(
|
||||
"default-src 'self'; \
|
||||
object-src 'self' blob:; \
|
||||
script-src 'self'{script_src}; \
|
||||
style-src 'self' 'unsafe-inline'; \
|
||||
child-src 'self' https://*.duosecurity.com https://*.duofederal.com; \
|
||||
frame-src 'self' https://*.duosecurity.com https://*.duofederal.com; \
|
||||
frame-ancestors 'self' \
|
||||
chrome-extension://nngceckbapebfimnlniiiahkandclblb \
|
||||
chrome-extension://jbkfoedolllekgbhcbcoahefnbanhhlh \
|
||||
moz-extension://* \
|
||||
{allowed_iframe_ancestors}; \
|
||||
img-src 'self' data: \
|
||||
https://haveibeenpwned.com/ \
|
||||
https://www.gravatar.com \
|
||||
{icon_service_csp}; \
|
||||
connect-src 'self' \
|
||||
https://api.pwnedpasswords.com/range/ \
|
||||
https://2fa.directory/api/ \
|
||||
https://app.simplelogin.io/api/ \
|
||||
https://app.anonaddy.com/api/ \
|
||||
https://api.fastmail.com/ \
|
||||
https://quack.duckduckgo.com/api/email/ \
|
||||
;\
|
||||
",
|
||||
icon_service_csp = CONFIG._icon_service_csp(),
|
||||
allowed_iframe_ancestors = CONFIG.allowed_iframe_ancestors()
|
||||
);
|
||||
res.set_raw_header("Content-Security-Policy", csp);
|
||||
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
|
||||
} else {
|
||||
// It looks like this header get's set somewhere else also, make sure this is not sent for these files, it will cause MFA issues.
|
||||
res.remove_header("X-Frame-Options");
|
||||
}
|
||||
|
||||
// Disable cache unless otherwise specified
|
||||
if !res.headers().contains("cache-control") {
|
||||
@@ -51,7 +106,7 @@ impl Fairing for AppHeaders {
|
||||
pub struct Cors();
|
||||
|
||||
impl Cors {
|
||||
fn get_header(headers: &HeaderMap, name: &str) -> String {
|
||||
fn get_header(headers: &HeaderMap<'_>, name: &str) -> String {
|
||||
match headers.get_one(name) {
|
||||
Some(h) => h.to_string(),
|
||||
_ => "".to_string(),
|
||||
@@ -60,7 +115,7 @@ impl Cors {
|
||||
|
||||
// Check a request's `Origin` header against the list of allowed origins.
|
||||
// If a match exists, return it. Otherwise, return None.
|
||||
fn get_allowed_origin(headers: &HeaderMap) -> Option<String> {
|
||||
fn get_allowed_origin(headers: &HeaderMap<'_>) -> Option<String> {
|
||||
let origin = Cors::get_header(headers, "Origin");
|
||||
let domain_origin = CONFIG.domain_origin();
|
||||
let safari_extension_origin = "file://";
|
||||
@@ -72,6 +127,7 @@ impl Cors {
|
||||
}
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl Fairing for Cors {
|
||||
fn info(&self) -> Info {
|
||||
Info {
|
||||
@@ -80,7 +136,7 @@ impl Fairing for Cors {
|
||||
}
|
||||
}
|
||||
|
||||
fn on_response(&self, request: &Request, response: &mut Response) {
|
||||
async fn on_response<'r>(&self, request: &'r Request<'_>, response: &mut Response<'r>) {
|
||||
let req_headers = request.headers();
|
||||
|
||||
if let Some(origin) = Cors::get_allowed_origin(req_headers) {
|
||||
@@ -97,7 +153,7 @@ impl Fairing for Cors {
|
||||
response.set_header(Header::new("Access-Control-Allow-Credentials", "true"));
|
||||
response.set_status(Status::Ok);
|
||||
response.set_header(ContentType::Plain);
|
||||
response.set_sized_body(Cursor::new(""));
|
||||
response.set_sized_body(Some(0), Cursor::new(""));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -134,32 +190,28 @@ impl<R> Cached<R> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'r, R: Responder<'r>> Responder<'r> for Cached<R> {
|
||||
fn respond_to(self, req: &Request) -> response::Result<'r> {
|
||||
impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cached<R> {
|
||||
fn respond_to(self, request: &'r Request<'_>) -> response::Result<'static> {
|
||||
let mut res = self.response.respond_to(request)?;
|
||||
|
||||
let cache_control_header = if self.is_immutable {
|
||||
format!("public, immutable, max-age={}", self.ttl)
|
||||
} else {
|
||||
format!("public, max-age={}", self.ttl)
|
||||
};
|
||||
res.set_raw_header("Cache-Control", cache_control_header);
|
||||
|
||||
let time_now = chrono::Local::now();
|
||||
|
||||
match self.response.respond_to(req) {
|
||||
Ok(mut res) => {
|
||||
res.set_raw_header("Cache-Control", cache_control_header);
|
||||
let expiry_time = time_now + chrono::Duration::seconds(self.ttl.try_into().unwrap());
|
||||
res.set_raw_header("Expires", format_datetime_http(&expiry_time));
|
||||
Ok(res)
|
||||
}
|
||||
e @ Err(_) => e,
|
||||
}
|
||||
let expiry_time = time_now + chrono::Duration::seconds(self.ttl.try_into().unwrap());
|
||||
res.set_raw_header("Expires", format_datetime_http(&expiry_time));
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SafeString(String);
|
||||
|
||||
impl std::fmt::Display for SafeString {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
@@ -175,11 +227,9 @@ impl<'r> FromParam<'r> for SafeString {
|
||||
type Error = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn from_param(param: &'r RawStr) -> Result<Self, Self::Error> {
|
||||
let s = param.percent_decode().map(|cow| cow.into_owned()).map_err(|_| ())?;
|
||||
|
||||
if s.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) {
|
||||
Ok(SafeString(s))
|
||||
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
|
||||
if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) {
|
||||
Ok(SafeString(param.to_string()))
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
@@ -193,15 +243,16 @@ const LOGGED_ROUTES: [&str; 6] =
|
||||
|
||||
// Boolean is extra debug, when true, we ignore the whitelist above and also print the mounts
|
||||
pub struct BetterLogging(pub bool);
|
||||
#[rocket::async_trait]
|
||||
impl Fairing for BetterLogging {
|
||||
fn info(&self) -> Info {
|
||||
Info {
|
||||
name: "Better Logging",
|
||||
kind: Kind::Launch | Kind::Request | Kind::Response,
|
||||
kind: Kind::Liftoff | Kind::Request | Kind::Response,
|
||||
}
|
||||
}
|
||||
|
||||
fn on_launch(&self, rocket: &Rocket) {
|
||||
async fn on_liftoff(&self, rocket: &Rocket<Orbit>) {
|
||||
if self.0 {
|
||||
info!(target: "routes", "Routes loaded:");
|
||||
let mut routes: Vec<_> = rocket.routes().collect();
|
||||
@@ -225,34 +276,36 @@ impl Fairing for BetterLogging {
|
||||
info!(target: "start", "Rocket has launched from {}", addr);
|
||||
}
|
||||
|
||||
fn on_request(&self, request: &mut Request<'_>, _data: &Data) {
|
||||
async fn on_request(&self, request: &mut Request<'_>, _data: &mut Data<'_>) {
|
||||
let method = request.method();
|
||||
if !self.0 && method == Method::Options {
|
||||
return;
|
||||
}
|
||||
let uri = request.uri();
|
||||
let uri_path = uri.path();
|
||||
let uri_subpath = uri_path.strip_prefix(&CONFIG.domain_path()).unwrap_or(uri_path);
|
||||
let uri_path_str = uri_path.url_decode_lossy();
|
||||
let uri_subpath = uri_path_str.strip_prefix(&CONFIG.domain_path()).unwrap_or(&uri_path_str);
|
||||
if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) {
|
||||
match uri.query() {
|
||||
Some(q) => info!(target: "request", "{} {}?{}", method, uri_path, &q[..q.len().min(30)]),
|
||||
None => info!(target: "request", "{} {}", method, uri_path),
|
||||
Some(q) => info!(target: "request", "{} {}?{}", method, uri_path_str, &q[..q.len().min(30)]),
|
||||
None => info!(target: "request", "{} {}", method, uri_path_str),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn on_response(&self, request: &Request, response: &mut Response) {
|
||||
async fn on_response<'r>(&self, request: &'r Request<'_>, response: &mut Response<'r>) {
|
||||
if !self.0 && request.method() == Method::Options {
|
||||
return;
|
||||
}
|
||||
let uri_path = request.uri().path();
|
||||
let uri_subpath = uri_path.strip_prefix(&CONFIG.domain_path()).unwrap_or(uri_path);
|
||||
let uri_path_str = uri_path.url_decode_lossy();
|
||||
let uri_subpath = uri_path_str.strip_prefix(&CONFIG.domain_path()).unwrap_or(&uri_path_str);
|
||||
if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) {
|
||||
let status = response.status();
|
||||
if let Some(route) = request.route() {
|
||||
info!(target: "response", "{} => {} {}", route, status.code, status.reason)
|
||||
if let Some(ref route) = request.route() {
|
||||
info!(target: "response", "{} => {}", route, status)
|
||||
} else {
|
||||
info!(target: "response", "{} {}", status.code, status.reason)
|
||||
info!(target: "response", "{}", status)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -263,7 +316,7 @@ impl Fairing for BetterLogging {
|
||||
//
|
||||
use std::{
|
||||
fs::{self, File},
|
||||
io::{Read, Result as IOResult},
|
||||
io::Result as IOResult,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
@@ -271,32 +324,23 @@ pub fn file_exists(path: &str) -> bool {
|
||||
Path::new(path).exists()
|
||||
}
|
||||
|
||||
pub fn read_file(path: &str) -> IOResult<Vec<u8>> {
|
||||
let mut contents: Vec<u8> = Vec::new();
|
||||
|
||||
let mut file = File::open(Path::new(path))?;
|
||||
file.read_to_end(&mut contents)?;
|
||||
|
||||
Ok(contents)
|
||||
}
|
||||
|
||||
pub fn write_file(path: &str, content: &[u8]) -> Result<(), crate::error::Error> {
|
||||
use std::io::Write;
|
||||
let mut f = File::create(path)?;
|
||||
let mut f = match File::create(path) {
|
||||
Ok(file) => file,
|
||||
Err(e) => {
|
||||
if e.kind() == ErrorKind::PermissionDenied {
|
||||
error!("Can't create '{}': Permission denied", path);
|
||||
}
|
||||
return Err(From::from(e));
|
||||
}
|
||||
};
|
||||
|
||||
f.write_all(content)?;
|
||||
f.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn read_file_string(path: &str) -> IOResult<String> {
|
||||
let mut contents = String::new();
|
||||
|
||||
let mut file = File::open(Path::new(path))?;
|
||||
file.read_to_string(&mut contents)?;
|
||||
|
||||
Ok(contents)
|
||||
}
|
||||
|
||||
pub fn delete_file(path: &str) -> IOResult<()> {
|
||||
let res = fs::remove_file(path);
|
||||
|
||||
@@ -337,6 +381,7 @@ pub fn get_uuid() -> String {
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
#[inline]
|
||||
pub fn upcase_first(s: &str) -> String {
|
||||
let mut c = s.chars();
|
||||
match c.next() {
|
||||
@@ -345,6 +390,15 @@ pub fn upcase_first(s: &str) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn lcase_first(s: &str) -> String {
|
||||
let mut c = s.chars();
|
||||
match c.next() {
|
||||
None => String::new(),
|
||||
Some(f) => f.to_lowercase().collect::<String>() + c.as_str(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_parse_string<S, T>(string: Option<S>) -> Option<T>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
@@ -501,7 +555,7 @@ struct UpCaseVisitor;
|
||||
impl<'de> Visitor<'de> for UpCaseVisitor {
|
||||
type Value = Value;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
formatter.write_str("an object or an array")
|
||||
}
|
||||
|
||||
@@ -582,14 +636,13 @@ where
|
||||
if tries >= max_tries {
|
||||
return err;
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(500));
|
||||
Handle::current().block_on(async move { sleep(Duration::from_millis(500)).await });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn retry_db<F, T, E>(func: F, max_tries: u32) -> Result<T, E>
|
||||
pub async fn retry_db<F, T, E>(func: F, max_tries: u32) -> Result<T, E>
|
||||
where
|
||||
F: Fn() -> Result<T, E>,
|
||||
E: std::error::Error,
|
||||
@@ -608,19 +661,22 @@ where
|
||||
|
||||
warn!("Can't connect to database, retrying: {:?}", e);
|
||||
|
||||
sleep(Duration::from_millis(1_000));
|
||||
sleep(Duration::from_millis(1_000)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use reqwest::{
|
||||
blocking::{Client, ClientBuilder},
|
||||
header,
|
||||
};
|
||||
use reqwest::{header, Client, ClientBuilder};
|
||||
|
||||
pub fn get_reqwest_client() -> Client {
|
||||
get_reqwest_client_builder().build().expect("Failed to build client")
|
||||
match get_reqwest_client_builder().build() {
|
||||
Ok(client) => client,
|
||||
Err(e) => {
|
||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
||||
get_reqwest_client_builder().trust_dns(false).build().expect("Failed to build client")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_reqwest_client_builder() -> ClientBuilder {
|
||||
@@ -628,3 +684,46 @@ pub fn get_reqwest_client_builder() -> ClientBuilder {
|
||||
headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Vaultwarden"));
|
||||
Client::builder().default_headers(headers).timeout(Duration::from_secs(10))
|
||||
}
|
||||
|
||||
pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
|
||||
match src_json {
|
||||
Value::Array(elm) => {
|
||||
let mut new_array: Vec<Value> = Vec::with_capacity(elm.len());
|
||||
|
||||
for obj in elm {
|
||||
new_array.push(convert_json_key_lcase_first(obj));
|
||||
}
|
||||
Value::Array(new_array)
|
||||
}
|
||||
|
||||
Value::Object(obj) => {
|
||||
let mut json_map = JsonMap::new();
|
||||
for (key, value) in obj.iter() {
|
||||
match (key, value) {
|
||||
(key, Value::Object(elm)) => {
|
||||
let inner_value = convert_json_key_lcase_first(Value::Object(elm.clone()));
|
||||
json_map.insert(lcase_first(key), inner_value);
|
||||
}
|
||||
|
||||
(key, Value::Array(elm)) => {
|
||||
let mut inner_array: Vec<Value> = Vec::with_capacity(elm.len());
|
||||
|
||||
for inner_obj in elm {
|
||||
inner_array.push(convert_json_key_lcase_first(inner_obj.clone()));
|
||||
}
|
||||
|
||||
json_map.insert(lcase_first(key), Value::Array(inner_array));
|
||||
}
|
||||
|
||||
(key, value) => {
|
||||
json_map.insert(lcase_first(key), value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Value::Object(json_map)
|
||||
}
|
||||
|
||||
value => value,
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user