Compare commits
467 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
10dadfca06 | ||
|
|
bf73a8235f | ||
|
|
67a584c1d4 | ||
|
|
8e5f03972e | ||
|
|
d8abf8f98f | ||
|
|
cb348d2e05 | ||
|
|
aceb111024 | ||
|
|
b60a4a68c7 | ||
|
|
8b6dfe48b7 | ||
|
|
6154e03c05 | ||
|
|
d0b53a6a3d | ||
|
|
317aa679cf | ||
|
|
8d1bc2e539 | ||
|
|
50c46f6e9a | ||
|
|
4f1928778a | ||
|
|
5fcba3d7f5 | ||
|
|
4db42b07c4 | ||
|
|
cd3e2d7a5a | ||
|
|
d139e22042 | ||
|
|
892296e6d5 | ||
|
|
992ef399ed | ||
|
|
5afba46743 | ||
|
|
df0aa7949e | ||
|
|
353d2e6e01 | ||
|
|
f9375bb215 | ||
|
|
8d04ff66e7 | ||
|
|
e649b11511 | ||
|
|
bda19bdddf | ||
|
|
99fd92df21 | ||
|
|
1210310063 | ||
|
|
b093384385 | ||
|
|
cec45ae9bd | ||
|
|
e6dd584dd6 | ||
|
|
7cc74dabaf | ||
|
|
2336f102f9 | ||
|
|
cebe0f6442 | ||
|
|
d9c0c23819 | ||
|
|
aa355a96f9 | ||
|
|
4a85dd2480 | ||
|
|
213909baa5 | ||
|
|
6915a60332 | ||
|
|
52a50e9ade | ||
|
|
b7c9a346c1 | ||
|
|
2d90c6ac24 | ||
|
|
7f7b5447fd | ||
|
|
142f7bb50d | ||
|
|
d209df9e10 | ||
|
|
1b56f4266b | ||
|
|
d6dc6070f3 | ||
|
|
d66323b742 | ||
|
|
7b09d74b1f | ||
|
|
c0e3c2c5e1 | ||
|
|
06189a58fe | ||
|
|
f402dd81bb | ||
|
|
c885bbc947 | ||
|
|
63fb0e5a57 | ||
|
|
37d0792a7d | ||
|
|
c8040d2f63 | ||
|
|
dbcad65b68 | ||
|
|
226da67bc0 | ||
|
|
fee2b5c3fb | ||
|
|
6bbb3d53ae | ||
|
|
610b183cef | ||
|
|
1b64b9e164 | ||
|
|
b022be9ba8 | ||
|
|
7f11363725 | ||
|
|
4aa6dd22bb | ||
|
|
8feed2916f | ||
|
|
59eaa0aa0d | ||
|
|
d5e54cb576 | ||
|
|
8837660ba7 | ||
|
|
464a489b44 | ||
|
|
7035700c8d | ||
|
|
23c2921690 | ||
|
|
7d506f3633 | ||
|
|
b186813049 | ||
|
|
bfa82225da | ||
|
|
ffa2044563 | ||
|
|
d57b69952d | ||
|
|
5a13efefd3 | ||
|
|
2f9d7060bd | ||
|
|
0aa33a2cb4 | ||
|
|
fa7dbedd5d | ||
|
|
2ea9b66943 | ||
|
|
f3beaea9e9 | ||
|
|
39ae2f1f76 | ||
|
|
366b1050ec | ||
|
|
b3aab7a6ad | ||
|
|
aa8d050d6b | ||
|
|
5200f0e98d | ||
|
|
5f4abb1b7f | ||
|
|
dfe1e30d1b | ||
|
|
e27a5be47a | ||
|
|
56786a18f1 | ||
|
|
0d2399d485 | ||
|
|
5bfc7cfde3 | ||
|
|
723f0cbc1e | ||
|
|
b141f789f6 | ||
|
|
7445ee40f8 | ||
|
|
4a9a0f7e64 | ||
|
|
63aad2e5d2 | ||
|
|
d0baa23f9a | ||
|
|
7a7673103f | ||
|
|
05d4788d1d | ||
|
|
6f0dea1b56 | ||
|
|
439ef44973 | ||
|
|
2a525b42cb | ||
|
|
aee91acfdc | ||
|
|
17388ec43e | ||
|
|
bdc1cd13a7 | ||
|
|
42db4b5c77 | ||
|
|
53da073274 | ||
|
|
b010dde661 | ||
|
|
c9ec389b24 | ||
|
|
baa2841b04 | ||
|
|
6af5c86081 | ||
|
|
f60a6929a9 | ||
|
|
2aa97fa121 | ||
|
|
b59809af46 | ||
|
|
ed24d51d3e | ||
|
|
870f0d0932 | ||
|
|
31b77bf178 | ||
|
|
b525f9aa4c | ||
|
|
8409b31d6b | ||
|
|
b878495d64 | ||
|
|
945b85da2f | ||
|
|
d4577d161e | ||
|
|
3c8e1c3ca9 | ||
|
|
88dba8c4dd | ||
|
|
21bc3bfd53 | ||
|
|
4cb5122e90 | ||
|
|
0a2a8be0ff | ||
|
|
720a046610 | ||
|
|
64ae5d4f81 | ||
|
|
ff7e22c08a | ||
|
|
0c267d073f | ||
|
|
bbc6470f65 | ||
|
|
23f1f8a576 | ||
|
|
0e6f6e612a | ||
|
|
4d1b860dad | ||
|
|
6576914e55 | ||
|
|
12075639f3 | ||
|
|
3b9bfe55d0 | ||
|
|
a0c6a7c0de | ||
|
|
a2d716aec3 | ||
|
|
c1c60e3b68 | ||
|
|
ed6e852904 | ||
|
|
a54065420c | ||
|
|
aa5a05960e | ||
|
|
f41ba2a60f | ||
|
|
2215cfefb9 | ||
|
|
4289663a16 | ||
|
|
ea19c2250e | ||
|
|
638766b346 | ||
|
|
d1ff136552 | ||
|
|
46ec11de12 | ||
|
|
4283a49e0b | ||
|
|
1e32db8c41 | ||
|
|
0f944ec7e2 | ||
|
|
736dbc9553 | ||
|
|
b4a38f1f63 | ||
|
|
646186fe38 | ||
|
|
c2725916f4 | ||
|
|
fd334e2b7d | ||
|
|
f9feca1ce4 | ||
|
|
677fd2ff32 | ||
|
|
f49eb8eb4d | ||
|
|
b0e0d68632 | ||
|
|
f3c8c16d79 | ||
|
|
2dd5086916 | ||
|
|
7532072d50 | ||
|
|
382e6107fe | ||
|
|
e6c6609e19 | ||
|
|
4cb5918950 | ||
|
|
55030f3687 | ||
|
|
ef4072e4ff | ||
|
|
c78d383ed1 | ||
|
|
5b96270874 | ||
|
|
2c0742387b | ||
|
|
1704d14f29 | ||
|
|
2d7ffbf378 | ||
|
|
dfd63f85c0 | ||
|
|
cd0c49eaf6 | ||
|
|
080e38d227 | ||
|
|
1a664fba6a | ||
|
|
c915ef815d | ||
|
|
adea4ec54d | ||
|
|
387b5eb2dd | ||
|
|
6337af59ed | ||
|
|
475c7b8f16 | ||
|
|
ac120be1c6 | ||
|
|
b70316e6d3 | ||
|
|
0a0f620d0b | ||
|
|
9132cc4a30 | ||
|
|
e50edcadfb | ||
|
|
2685099720 | ||
|
|
6fa6eb18e8 | ||
|
|
bb79396f0e | ||
|
|
da9fd6b7d0 | ||
|
|
5b8067ef77 | ||
|
|
9eabcd5cae | ||
|
|
d6e0d4cbbd | ||
|
|
e5e6db2688 | ||
|
|
186fe24484 | ||
|
|
5da96d36e6 | ||
|
|
f4b1071e23 | ||
|
|
18291b6533 | ||
|
|
8095cb68bb | ||
|
|
04cd751556 | ||
|
|
7ce2372f51 | ||
|
|
aebda93afe | ||
|
|
2b7b1141eb | ||
|
|
1ff4ff72bf | ||
|
|
d27e91a9b0 | ||
|
|
7cf063b196 | ||
|
|
642f04d493 | ||
|
|
fc6e65e4b0 | ||
|
|
db5c98ec3b | ||
|
|
73c64af27e | ||
|
|
b3f7db813f | ||
|
|
59660ff087 | ||
|
|
69a69e8e04 | ||
|
|
1094f359c3 | ||
|
|
102ee3f871 | ||
|
|
acb5ab08a8 | ||
|
|
ae59472d9a | ||
|
|
5a07b193dc | ||
|
|
fd2edb9adc | ||
|
|
1d074f7b3f | ||
|
|
81984c4bce | ||
|
|
9c891baad1 | ||
|
|
b050c60807 | ||
|
|
e47a2fd0f3 | ||
|
|
42b9cc73ac | ||
|
|
edca4248aa | ||
|
|
b1b6bc9be0 | ||
|
|
818b254cef | ||
|
|
ddfac5e34b | ||
|
|
8b5c945bad | ||
|
|
50c5eb9c50 | ||
|
|
94be67eac1 | ||
|
|
5a05139efe | ||
|
|
a62dc102fb | ||
|
|
518d74ce21 | ||
|
|
7598997deb | ||
|
|
3c876dc202 | ||
|
|
1722742ab3 | ||
|
|
d9c0eb3cfc | ||
|
|
0d990e1dc0 | ||
|
|
60ed5ff99d | ||
|
|
5b98bd66ee | ||
|
|
abd20777fe | ||
|
|
7f0d0cf8a4 | ||
|
|
6e23a573fb | ||
|
|
ce9d93003c | ||
|
|
abfa868423 | ||
|
|
331f6c08fe | ||
|
|
c0efd3d419 | ||
|
|
1385d75972 | ||
|
|
9a787dd105 | ||
|
|
0dcc435bb4 | ||
|
|
f1a67663d1 | ||
|
|
0f95bdc9bb | ||
|
|
a0eab35768 | ||
|
|
027c87dd07 | ||
|
|
f2b31352fe | ||
|
|
c9376e3126 | ||
|
|
7cbcad0e38 | ||
|
|
e167798449 | ||
|
|
fc5928772b | ||
|
|
8263bdd21d | ||
|
|
3c1d4254e7 | ||
|
|
55d7c48b1d | ||
|
|
bf623eed7f | ||
|
|
84bcac0112 | ||
|
|
31595888ea | ||
|
|
5c38b2c4eb | ||
|
|
ebe9162af9 | ||
|
|
b64cf27038 | ||
|
|
0c4e79cff6 | ||
|
|
5b9129a086 | ||
|
|
93d4a12834 | ||
|
|
bf3e2dc652 | ||
|
|
0d0e98d783 | ||
|
|
5a55cfbb9b | ||
|
|
ac93b8a6b9 | ||
|
|
93786d9ebd | ||
|
|
a6dbb580c9 | ||
|
|
e62678abdb | ||
|
|
af50eae604 | ||
|
|
cb4f6aa7f6 | ||
|
|
5e13b1a7cb | ||
|
|
60b339f450 | ||
|
|
f71c779860 | ||
|
|
221a11de9b | ||
|
|
794483c10d | ||
|
|
c9934ccdb7 | ||
|
|
54729f3c1e | ||
|
|
f1a86acb98 | ||
|
|
6b6ea3c8bf | ||
|
|
bf403fee7d | ||
|
|
5cd920cf6f | ||
|
|
45d3b479bc | ||
|
|
c7a752b01d | ||
|
|
099d359628 | ||
|
|
006a2aacbb | ||
|
|
b71d9dd53e | ||
|
|
887e320e7f | ||
|
|
d7c18fd86e | ||
|
|
7566f3db3e | ||
|
|
5d05ec58be | ||
|
|
d9a452f558 | ||
|
|
dec03b3dc0 | ||
|
|
85950bdc0b | ||
|
|
f95bd3bb04 | ||
|
|
e33b8fab34 | ||
|
|
b00fbf153e | ||
|
|
0de5919a16 | ||
|
|
699777be9e | ||
|
|
16ff49d712 | ||
|
|
54c78cf06d | ||
|
|
303eaabeea | ||
|
|
6b6f5b8d04 | ||
|
|
0c18a7e306 | ||
|
|
a23a38080b | ||
|
|
316ca66a4b | ||
|
|
2f71a01877 | ||
|
|
d5cfbfc71d | ||
|
|
12612da75e | ||
|
|
68ec5f2a18 | ||
|
|
00670450df | ||
|
|
dbd95e08e9 | ||
|
|
3713f2d134 | ||
|
|
a85a250dfd | ||
|
|
5845ed2c92 | ||
|
|
40ed505581 | ||
|
|
bf0b8d9968 | ||
|
|
d0a7437dbd | ||
|
|
21b433c5d7 | ||
|
|
7c89bc619a | ||
|
|
0d3daa9fc6 | ||
|
|
0c1f0bad17 | ||
|
|
72cf59fa54 | ||
|
|
527bc1f625 | ||
|
|
2168d09421 | ||
|
|
1c266031d7 | ||
|
|
b636d20c64 | ||
|
|
2a9ca88c2a | ||
|
|
b9c434addb | ||
|
|
451ad47327 | ||
|
|
7f61dd5fe3 | ||
|
|
3ca85028ea | ||
|
|
542a73cc6e | ||
|
|
78d07e2fda | ||
|
|
b617ffd2af | ||
|
|
3abf173d89 | ||
|
|
df8aeb10e8 | ||
|
|
26ad06df7c | ||
|
|
37fff3ef4a | ||
|
|
28c5e63bf5 | ||
|
|
a07c213b3e | ||
|
|
ed72741f48 | ||
|
|
fb0c23b71f | ||
|
|
d98f95f536 | ||
|
|
6643e83b61 | ||
|
|
7b742009a1 | ||
|
|
649e2b48f3 | ||
|
|
81f0c2b0e8 | ||
|
|
80d8aa7239 | ||
|
|
27d4b713f6 | ||
|
|
b0faaf2527 | ||
|
|
8d06d9c111 | ||
|
|
c4d565b15b | ||
|
|
06f8e69c70 | ||
|
|
7db52374cd | ||
|
|
843f205f6f | ||
|
|
2ff51ae77e | ||
|
|
2b75d81a8b | ||
|
|
cad0dcbed1 | ||
|
|
19b8388950 | ||
|
|
87e08b9e50 | ||
|
|
0b7d6bf6df | ||
|
|
89fe05b6cc | ||
|
|
d73d74e78f | ||
|
|
9a682b7a45 | ||
|
|
94201ca133 | ||
|
|
99f9e7252a | ||
|
|
42136a7097 | ||
|
|
9bb4c38bf9 | ||
|
|
5f01db69ff | ||
|
|
c59a7f4a8c | ||
|
|
8295688bed | ||
|
|
9713a3a555 | ||
|
|
d781981bbd | ||
|
|
5125fdb882 | ||
|
|
fd9693b961 | ||
|
|
f38926d666 | ||
|
|
775d07e9a0 | ||
|
|
2d5f172e77 | ||
|
|
08f0de7b46 | ||
|
|
45122bed9e | ||
|
|
0876d4a5fd | ||
|
|
7d552dbdc8 | ||
|
|
9a60eb04c2 | ||
|
|
1b99da91fb | ||
|
|
a64a400c9c | ||
|
|
85c0aa1619 | ||
|
|
19e78e3509 | ||
|
|
bf6330374c | ||
|
|
e639d9063b | ||
|
|
4a88e7ec78 | ||
|
|
65dad5a9d1 | ||
|
|
ba9ad14fbb | ||
|
|
62c7a4d491 | ||
|
|
14e3dcad8e | ||
|
|
f4a9645b54 | ||
|
|
8f7900759f | ||
|
|
69ee4a70b4 | ||
|
|
e4e16ed50f | ||
|
|
a16c656770 | ||
|
|
76b7de15de | ||
|
|
8ba6e61fd5 | ||
|
|
a30a1c9703 | ||
|
|
76687e2df7 | ||
|
|
bf5aefd129 | ||
|
|
1fa178d1d3 | ||
|
|
b7eedbcddc | ||
|
|
920371929b | ||
|
|
6ddbe84bde | ||
|
|
690d0ed1bb | ||
|
|
248e7dabc2 | ||
|
|
4584cfe3c1 | ||
|
|
cc646b1519 | ||
|
|
e501dc6d0e | ||
|
|
85ac9783f0 | ||
|
|
5b430f22bc | ||
|
|
d4eb21c2d9 | ||
|
|
6bf8a9d93d | ||
|
|
605419ae1b | ||
|
|
b89ffb2731 | ||
|
|
bda123b1eb | ||
|
|
2c94ea075c | ||
|
|
4bd8eae07e | ||
|
|
5529264c3f | ||
|
|
0a5df06e77 | ||
|
|
2f9ac61a4e | ||
|
|
840cf8740a | ||
|
|
d8869adf52 | ||
|
|
a631fc0077 | ||
|
|
9e4d372213 | ||
|
|
d0bf0ab237 | ||
|
|
e327583aa5 | ||
|
|
ead2f02cbd | ||
|
|
c453528dc1 | ||
|
|
6ae48aa8c2 | ||
|
|
88643fd9d5 | ||
|
|
73e0002219 | ||
|
|
c49ee47de0 | ||
|
|
14408396bb | ||
|
|
6cbb724069 | ||
|
|
a2316ca091 | ||
|
|
c476e19796 | ||
|
|
9f393cfd9d | ||
|
|
450c4d4d97 | ||
|
|
75e62abed0 | ||
|
|
97f9eb1320 | ||
|
|
53cc8a65af |
@@ -3,13 +3,18 @@ target
|
||||
|
||||
# Data folder
|
||||
data
|
||||
|
||||
# Misc
|
||||
.env
|
||||
.env.template
|
||||
.gitattributes
|
||||
.gitignore
|
||||
rustfmt.toml
|
||||
|
||||
# IDE files
|
||||
.vscode
|
||||
.idea
|
||||
.editorconfig
|
||||
*.iml
|
||||
|
||||
# Documentation
|
||||
@@ -19,9 +24,17 @@ data
|
||||
*.yml
|
||||
*.yaml
|
||||
|
||||
# Docker folders
|
||||
# Docker
|
||||
hooks
|
||||
tools
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
docker/**
|
||||
!docker/healthcheck.sh
|
||||
!docker/start.sh
|
||||
|
||||
# Web vault
|
||||
web-vault
|
||||
web-vault
|
||||
|
||||
# Vaultwarden Resources
|
||||
resources
|
||||
|
||||
124
.env.template
@@ -1,8 +1,14 @@
|
||||
# shellcheck disable=SC2034,SC2148
|
||||
## Vaultwarden Configuration File
|
||||
## Uncomment any of the following lines to change the defaults
|
||||
##
|
||||
## Be aware that most of these settings will be overridden if they were changed
|
||||
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||
##
|
||||
## By default, Vaultwarden expects for this file to be named ".env" and located
|
||||
## in the current working directory. If this is not the case, the environment
|
||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||
## Vaultwarden.
|
||||
|
||||
## Main data folder
|
||||
# DATA_FOLDER=data
|
||||
@@ -24,11 +30,21 @@
|
||||
## Define the size of the connection pool used for connecting to the database.
|
||||
# DATABASE_MAX_CONNS=10
|
||||
|
||||
## Database connection initialization
|
||||
## Allows SQL statements to be run whenever a new database connection is created.
|
||||
## This is mainly useful for connection-scoped pragmas.
|
||||
## If empty, a database-specific default is used:
|
||||
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
|
||||
## - MySQL: ""
|
||||
## - PostgreSQL: ""
|
||||
# DATABASE_CONN_INIT=""
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
# RSA_KEY_FILENAME=data/rsa_key
|
||||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
# ATTACHMENTS_FOLDER=data/attachments
|
||||
# SENDS_FOLDER=data/sends
|
||||
# TMP_FOLDER=data/tmp
|
||||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
@@ -65,11 +81,34 @@
|
||||
## This setting applies globally to all users.
|
||||
# EMERGENCY_ACCESS_ALLOWED=true
|
||||
|
||||
## Controls whether event logging is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||
# ORG_EVENTS_ENABLED=false
|
||||
|
||||
## Number of days to retain events stored in the database.
|
||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||
# EVENTS_DAYS_RETAIN=
|
||||
|
||||
## BETA FEATURE: Groups
|
||||
## Controls whether group support is enabled for organizations
|
||||
## This setting applies to organizations.
|
||||
## Disabled by default because this is a beta feature, it contains known issues!
|
||||
## KNOW WHAT YOU ARE DOING!
|
||||
# ORG_GROUPS_ENABLED=false
|
||||
|
||||
## Job scheduler settings
|
||||
##
|
||||
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
|
||||
## and are always in terms of UTC time (regardless of your local time zone settings).
|
||||
##
|
||||
## The schedule format is a bit different from crontab as crontab does not contains seconds.
|
||||
## You can test the the format here: https://crontab.guru, but remove the first digit!
|
||||
## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK
|
||||
## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri"
|
||||
## "0 30 * * * * "
|
||||
## "0 30 1 * * * "
|
||||
##
|
||||
## How often (in ms) the job scheduler thread checks for jobs that need running.
|
||||
## Set to 0 to globally disable scheduled jobs.
|
||||
# JOB_POLL_INTERVAL_MS=30000
|
||||
@@ -82,13 +121,21 @@
|
||||
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
|
||||
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
|
||||
##
|
||||
## Cron schedule of the job that checks for incomplete 2FA logins.
|
||||
## Defaults to once every minute. Set blank to disable this job.
|
||||
# INCOMPLETE_2FA_SCHEDULE="30 * * * * *"
|
||||
##
|
||||
## Cron schedule of the job that sends expiration reminders to emergency access grantors.
|
||||
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 5 * * * *"
|
||||
## Defaults to hourly (3 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 3 * * * *"
|
||||
##
|
||||
## Cron schedule of the job that grants emergency access requests that have met the required wait time.
|
||||
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 5 * * * *"
|
||||
## Defaults to hourly (7 minutes after the hour). Set blank to disable this job.
|
||||
# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 7 * * * *"
|
||||
##
|
||||
## Cron schedule of the job that cleans old events from the event table.
|
||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||
|
||||
## Enable extended logging, which shows timestamps and targets in the logs
|
||||
# EXTENDED_LOGGING=true
|
||||
@@ -98,12 +145,10 @@
|
||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||
|
||||
## Logging to file
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# LOG_FILE=/path/to/log
|
||||
|
||||
## Logging to Syslog
|
||||
## This requires extended logging
|
||||
## It's recommended to also set 'ROCKET_CLI_COLORS=off'
|
||||
# USE_SYSLOG=false
|
||||
|
||||
## Log level
|
||||
@@ -116,7 +161,7 @@
|
||||
## Enable WAL for the DB
|
||||
## Set to false to avoid enabling WAL during startup.
|
||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||
## this setting only prevents vaultwarden from automatically enabling it on start.
|
||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||
## Please read project wiki page about this setting first before changing the value as it can
|
||||
## cause performance degradation or might render the service unable to start.
|
||||
# ENABLE_DB_WAL=true
|
||||
@@ -125,10 +170,32 @@
|
||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||
# DB_CONNECTION_RETRIES=15
|
||||
|
||||
## Icon service
|
||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||
##
|
||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||
## redirect to the corresponding icon at the external service. An external service may
|
||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||
## for certain sites have been cached.
|
||||
# ICON_SERVICE=internal
|
||||
|
||||
## Icon redirect code
|
||||
## The HTTP status code to use for redirects to an external icon service.
|
||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||
## Temporary redirects are useful while testing different icon services, but once a service
|
||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||
## are currently better supported by the Bitwarden clients.
|
||||
# ICON_REDIRECT_CODE=302
|
||||
|
||||
## Disable icon downloading
|
||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
## otherwise it will delete them and they won't be downloaded again.
|
||||
## Set to true to disable icon downloading in the internal icon service.
|
||||
## This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
|
||||
## network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
|
||||
## will be deleted eventually, but won't be downloaded again.
|
||||
# DISABLE_ICON_DOWNLOAD=false
|
||||
|
||||
## Icon download timeout
|
||||
@@ -159,7 +226,7 @@
|
||||
# EMAIL_EXPIRATION_TIME=600
|
||||
|
||||
## Email token size
|
||||
## Number of digits in an email token (min: 6, max: 19).
|
||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||
# EMAIL_TOKEN_SIZE=6
|
||||
|
||||
@@ -206,6 +273,10 @@
|
||||
## Name shown in the invitation emails that don't come from a specific organization
|
||||
# INVITATION_ORG_NAME=Vaultwarden
|
||||
|
||||
## The number of hours after which an organization invite token, emergency access invite token,
|
||||
## email verification token and deletion request token will expire (must be at least 1)
|
||||
# INVITATION_EXPIRATION_HOURS=120
|
||||
|
||||
## Per-organization attachment storage limit (KB)
|
||||
## Max kilobytes of attachment storage allowed per organization.
|
||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||
@@ -220,10 +291,20 @@
|
||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||
# TRASH_AUTO_DELETE_DAYS=
|
||||
|
||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||
## master password was provided but the required 2FA step was not completed, which
|
||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||
## This setting applies globally to all users.
|
||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||
|
||||
## Controls the PBBKDF password iterations to apply on the server
|
||||
## The change only applies when the password is changed
|
||||
# PASSWORD_ITERATIONS=100000
|
||||
|
||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||
# PASSWORD_HINTS_ALLOWED=true
|
||||
|
||||
## Controls whether a password hint should be shown directly in the web page if
|
||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||
## as this provides unauthenticated access to potentially sensitive data.
|
||||
@@ -234,7 +315,7 @@
|
||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||
## like attachment downloads, email links and U2F.
|
||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||
# DOMAIN=https://bw.domain.tld:8443
|
||||
# DOMAIN=https://vw.domain.tld:8443
|
||||
|
||||
## Allowed iframe ancestors (Know the risks!)
|
||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||
@@ -243,6 +324,17 @@
|
||||
## Multiple values must be separated with a whitespace.
|
||||
# ALLOWED_IFRAME_ANCESTORS=
|
||||
|
||||
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
|
||||
# LOGIN_RATELIMIT_SECONDS=60
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.
|
||||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||
|
||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||
# ADMIN_RATELIMIT_SECONDS=300
|
||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||
|
||||
## Yubico (Yubikey) Settings
|
||||
## Set your Client ID and Secret Key for Yubikey OTP
|
||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||
@@ -287,9 +379,8 @@
|
||||
# SMTP_HOST=smtp.domain.tld
|
||||
# SMTP_FROM=vaultwarden@domain.tld
|
||||
# SMTP_FROM_NAME=Vaultwarden
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
||||
# SMTP_USERNAME=username
|
||||
# SMTP_PASSWORD=password
|
||||
# SMTP_TIMEOUT=15
|
||||
@@ -304,6 +395,9 @@
|
||||
## but might need to be changed in case it trips some anti-spam filters
|
||||
# HELO_NAME=
|
||||
|
||||
## Embed images as email attachments
|
||||
# SMTP_EMBED_IMAGES=false
|
||||
|
||||
## SMTP debugging
|
||||
## When set to true this will output very detailed SMTP messages.
|
||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
|
||||
1
.github/FUNDING.yml
vendored
@@ -1,2 +1,3 @@
|
||||
github: dani-garcia
|
||||
liberapay: dani-garcia
|
||||
custom: ["https://paypal.me/DaniGG"]
|
||||
|
||||
211
.github/workflows/build.yml
vendored
@@ -8,7 +8,6 @@ on:
|
||||
- "migrations/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "diesel.toml"
|
||||
- "rust-toolchain"
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -17,11 +16,12 @@ on:
|
||||
- "migrations/**"
|
||||
- "Cargo.*"
|
||||
- "build.rs"
|
||||
- "diesel.toml"
|
||||
- "rust-toolchain"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
# Make warnings errors, this is to prevent warnings slipping through.
|
||||
# This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes.
|
||||
env:
|
||||
@@ -30,118 +30,163 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
channel:
|
||||
- nightly
|
||||
target-triple:
|
||||
- x86_64-unknown-linux-gnu
|
||||
- "rust-toolchain" # The version defined in rust-toolchain
|
||||
- "msrv" # The supported MSRV
|
||||
include:
|
||||
- target-triple: x86_64-unknown-linux-gnu
|
||||
host-triple: x86_64-unknown-linux-gnu
|
||||
features: [sqlite,mysql,postgresql] # Remember to update the `cargo test` to match the amount of features
|
||||
channel: nightly
|
||||
os: ubuntu-20.04
|
||||
ext: ""
|
||||
- channel: "msrv"
|
||||
version: "1.60.0"
|
||||
|
||||
name: Build and Test ${{ matrix.channel }}
|
||||
|
||||
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
|
||||
- name: "Checkout"
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Install musl-tools when needed
|
||||
- name: Install musl tools
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||
# End Install musl-tools when needed
|
||||
|
||||
|
||||
# Install dependencies
|
||||
- name: Install dependencies Ubuntu
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||
if: startsWith( matrix.os, 'ubuntu' )
|
||||
- name: "Install dependencies Ubuntu"
|
||||
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config
|
||||
# End Install dependencies
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@842ef286fff290e445b90b4002cc9807c3669641 # v1.3.0
|
||||
# End Enable Rust Caching
|
||||
|
||||
# Determine rust-toolchain version
|
||||
- name: Init Variables
|
||||
id: toolchain
|
||||
shell: bash
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
RUST_TOOLCHAIN="$(cat rust-toolchain)"
|
||||
echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}"
|
||||
# End Determine rust-toolchain version
|
||||
|
||||
# Uses the rust-toolchain file to determine version
|
||||
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f # v1.0.6
|
||||
- name: "Install rust-toolchain version"
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb # master @ 2022-10-25 - 21:40 GMT+2
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target-triple }}
|
||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||
components: clippy, rustfmt
|
||||
# End Uses the rust-toolchain file to determine version
|
||||
|
||||
|
||||
# Install the MSRV channel to be used
|
||||
- name: "Install MSRV version"
|
||||
uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb # master @ 2022-10-25 - 21:40 GMT+2
|
||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}
|
||||
# End Install the MSRV channel to be used
|
||||
|
||||
|
||||
# Enable Rust Caching
|
||||
- uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0
|
||||
# End Enable Rust Caching
|
||||
|
||||
|
||||
# Show environment
|
||||
- name: "Show environment"
|
||||
run: |
|
||||
rustc -vV
|
||||
cargo -vV
|
||||
# End Show environment
|
||||
|
||||
|
||||
# Run cargo tests (In release mode to speed up future builds)
|
||||
# First test all features together, afterwards test them separately.
|
||||
- name: "`cargo test --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||
# Test single features
|
||||
# 0: sqlite
|
||||
- name: "`cargo test --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[0] != '' }}
|
||||
# 1: mysql
|
||||
- name: "`cargo test --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[1] != '' }}
|
||||
# 2: postgresql
|
||||
- name: "`cargo test --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
with:
|
||||
command: test
|
||||
args: --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}
|
||||
if: ${{ matrix.features[2] != '' }}
|
||||
- name: "test features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: test_sqlite_mysql_postgresql_mimalloc
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
- name: "test features: sqlite,mysql,postgresql"
|
||||
id: test_sqlite_mysql_postgresql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite,mysql,postgresql
|
||||
|
||||
- name: "test features: sqlite"
|
||||
id: test_sqlite
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features sqlite
|
||||
|
||||
- name: "test features: mysql"
|
||||
id: test_mysql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features mysql
|
||||
|
||||
- name: "test features: postgresql"
|
||||
id: test_postgresql
|
||||
if: $${{ always() }}
|
||||
run: |
|
||||
cargo test --release --features postgresql
|
||||
# End Run cargo tests
|
||||
|
||||
|
||||
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
|
||||
- name: "`cargo clippy --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
with:
|
||||
command: clippy
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} -- -D warnings
|
||||
- name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc"
|
||||
id: clippy
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo clippy --release --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings
|
||||
# End Run cargo clippy
|
||||
|
||||
|
||||
# Run cargo fmt
|
||||
- name: '`cargo fmt`'
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
# Run cargo fmt (Only run on rust-toolchain defined version)
|
||||
- name: "check formatting"
|
||||
id: formatting
|
||||
if: ${{ always() && matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo fmt --all -- --check
|
||||
# End Run cargo fmt
|
||||
|
||||
|
||||
# Build the binary
|
||||
- name: "`cargo build --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
|
||||
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
|
||||
with:
|
||||
command: build
|
||||
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
|
||||
# Check for any previous failures, if there are stop, else continue.
|
||||
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
||||
- name: "Some checks failed"
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
echo "### :x: Checks Failed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|Job|Status|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|---|------|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite,mysql,postgresql)|${{ steps.test_sqlite_mysql_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (sqlite)|${{ steps.test_sqlite.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (mysql)|${{ steps.test_mysql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|test (postgresql)|${{ steps.test_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.clippy.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|fmt|${{ steps.formatting.outcome }}|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Please check the failed jobs and fix where needed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
|
||||
|
||||
# Check for any previous failures, if there are stop, else continue.
|
||||
# This is useful so all test/clippy/fmt actions are done, and they can all be addressed
|
||||
- name: "All checks passed"
|
||||
if: ${{ success() }}
|
||||
run: |
|
||||
echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
# Build the binary to upload to the artifacts
|
||||
- name: "build features: sqlite,mysql,postgresql"
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
run: |
|
||||
cargo build --release --features sqlite,mysql,postgresql
|
||||
# End Build the binary
|
||||
|
||||
|
||||
# Upload artifact to Github Actions
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@27121b0bdffd731efa15d66772be8dc71245d074 # v2.2.4
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
|
||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||
with:
|
||||
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
|
||||
name: vaultwarden
|
||||
path: target/release/vaultwarden
|
||||
# End Upload artifact to Github Actions
|
||||
|
||||
19
.github/workflows/hadolint.yml
vendored
@@ -1,33 +1,30 @@
|
||||
name: Hadolint
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "docker/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "docker/**"
|
||||
on: [
|
||||
push,
|
||||
pull_request
|
||||
]
|
||||
|
||||
jobs:
|
||||
hadolint:
|
||||
name: Validate Dockerfile syntax
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
# End Checkout the repo
|
||||
|
||||
|
||||
# Download hadolint
|
||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||
- name: Download hadolint
|
||||
shell: bash
|
||||
run: |
|
||||
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||
sudo chmod +x /usr/local/bin/hadolint
|
||||
env:
|
||||
HADOLINT_VERSION: 2.7.0
|
||||
HADOLINT_VERSION: 2.12.0
|
||||
# End Download hadolint
|
||||
|
||||
# Test Dockerfiles
|
||||
|
||||
17
.github/workflows/release.yml
vendored
@@ -24,21 +24,22 @@ jobs:
|
||||
# Some checks to determine if we need to continue with building a new docker.
|
||||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
|
||||
skip_check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- name: Skip Duplicates Actions
|
||||
id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@f75dd6564bb646f95277dc8c3b80612e46a4a1ea # v3.4.1
|
||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
# Only run this when not creating a tag
|
||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
||||
|
||||
docker-build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
needs: skip_check
|
||||
# Start a local docker registry to be used to generate multi-arch images.
|
||||
services:
|
||||
@@ -60,13 +61,13 @@ jobs:
|
||||
steps:
|
||||
# Checkout the repo
|
||||
- name: Checkout
|
||||
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 # v1.10.0
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -78,11 +79,9 @@ jobs:
|
||||
run: |
|
||||
# Check which main tag we are going to build determined by github.ref
|
||||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
|
||||
echo "set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}"
|
||||
echo "::set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}"
|
||||
echo "DOCKER_TAG=${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
|
||||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
|
||||
echo "set-output name=DOCKER_TAG::testing"
|
||||
echo "::set-output name=DOCKER_TAG::testing"
|
||||
echo "DOCKER_TAG=testing" | tee -a "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
# End Determine Docker Tag
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.0.1
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
@@ -25,14 +25,16 @@ repos:
|
||||
description: Test the package for errors.
|
||||
entry: cargo test
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql", "--"]
|
||||
types: [rust]
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
||||
pass_filenames: false
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
description: Lint Rust sources
|
||||
entry: cargo clippy
|
||||
language: system
|
||||
args: ["--features", "sqlite,mysql,postgresql", "--", "-D", "warnings"]
|
||||
types: [rust]
|
||||
args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"]
|
||||
types_or: [rust, file]
|
||||
files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$)
|
||||
pass_filenames: false
|
||||
|
||||
3273
Cargo.lock
generated
179
Cargo.toml
@@ -2,8 +2,8 @@
|
||||
name = "vaultwarden"
|
||||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2018"
|
||||
rust-version = "1.57"
|
||||
edition = "2021"
|
||||
rust-version = "1.60.0"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
@@ -13,6 +13,7 @@ publish = false
|
||||
build = "build.rs"
|
||||
|
||||
[features]
|
||||
# default = ["sqlite"]
|
||||
# Empty to keep compatibility, prefer to set USE_SYSLOG=true
|
||||
enable_syslog = []
|
||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||
@@ -20,137 +21,145 @@ postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||
# Enable to use a vendored and statically linked openssl
|
||||
vendored_openssl = ["openssl/vendored"]
|
||||
# Enable MiMalloc memory allocator to replace the default malloc
|
||||
# This can improve performance for Alpine builds
|
||||
enable_mimalloc = ["mimalloc"]
|
||||
# This is a development dependency, and should only be used during development!
|
||||
# It enables the usage of the diesel_logger crate, which is able to output the generated queries.
|
||||
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
||||
# if you want to turn off the logging for a specific run.
|
||||
query_logger = ["diesel_logger"]
|
||||
|
||||
# Enable unstable features, requires nightly
|
||||
# Currently only used to enable rusts official ip support
|
||||
unstable = []
|
||||
|
||||
[target."cfg(not(windows))".dependencies]
|
||||
syslog = "4.0.1"
|
||||
# Logging
|
||||
syslog = "6.0.1" # Needs to be v4 until fern is updated
|
||||
|
||||
[dependencies]
|
||||
# Web framework for nightly with a focus on ease-of-use, expressibility, and speed.
|
||||
rocket = { version = "=0.5.0-dev", features = ["tls"], default-features = false }
|
||||
rocket_contrib = "=0.5.0-dev"
|
||||
# Logging
|
||||
log = "0.4.17"
|
||||
fern = { version = "0.6.1", features = ["syslog-6"] }
|
||||
tracing = { version = "0.1.37", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.5", features = ["blocking", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
backtrace = "0.3.67" # Logging panics to logfile instead stderr only
|
||||
|
||||
# Used for custom short lived cookie jar
|
||||
cookie = "0.15.1"
|
||||
cookie_store = "0.15.0"
|
||||
bytes = "1.1.0"
|
||||
url = "2.2.2"
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenvy = { version = "0.15.6", default-features = false }
|
||||
|
||||
# multipart/form-data support
|
||||
multipart = { version = "0.18.0", features = ["server"], default-features = false }
|
||||
# Lazy initialization
|
||||
once_cell = "1.16.0"
|
||||
|
||||
# WebSockets library
|
||||
ws = { version = "0.11.0", package = "parity-ws" }
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.15"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# MessagePack library
|
||||
rmpv = "1.0.0"
|
||||
# Web framework
|
||||
rocket = { version = "0.5.0-rc.2", features = ["tls", "json"], default-features = false }
|
||||
|
||||
# Concurrent hashmap implementation
|
||||
chashmap = "2.2.2"
|
||||
# WebSockets libraries
|
||||
tokio-tungstenite = "0.18.0"
|
||||
rmpv = "1.0.0" # MessagePack library
|
||||
dashmap = "5.4.0"
|
||||
|
||||
# Async futures
|
||||
futures = "0.3.25"
|
||||
tokio = { version = "1.23.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.130", features = ["derive"] }
|
||||
serde_json = "1.0.68"
|
||||
|
||||
# Logging
|
||||
log = "0.4.14"
|
||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||
serde = { version = "1.0.150", features = ["derive"] }
|
||||
serde_json = "1.0.89"
|
||||
|
||||
# A safe, extensible ORM and Query builder
|
||||
diesel = { version = "1.4.8", features = [ "chrono", "r2d2"] }
|
||||
diesel_migrations = "1.4.0"
|
||||
diesel = { version = "2.0.2", features = ["chrono", "r2d2"] }
|
||||
diesel_migrations = "2.0.0"
|
||||
diesel_logger = { version = "0.2.0", optional = true }
|
||||
|
||||
# Bundled SQLite
|
||||
libsqlite3-sys = { version = "0.22.2", features = ["bundled"], optional = true }
|
||||
libsqlite3-sys = { version = "0.25.2", features = ["bundled"], optional = true }
|
||||
|
||||
# Crypto-related libraries
|
||||
rand = "0.8.4"
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
ring = "0.16.20"
|
||||
|
||||
# UUID generation
|
||||
uuid = { version = "0.8.2", features = ["v4"] }
|
||||
uuid = { version = "1.2.2", features = ["v4"] }
|
||||
|
||||
# Date and time libraries
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
chrono-tz = "0.6.0"
|
||||
time = "0.2.27"
|
||||
chrono = { version = "0.4.23", features = ["clock", "serde"], default-features = false }
|
||||
chrono-tz = "0.8.1"
|
||||
time = "0.3.17"
|
||||
|
||||
# Job scheduler
|
||||
job_scheduler = "1.2.1"
|
||||
job_scheduler_ng = "2.0.3"
|
||||
|
||||
# TOTP library
|
||||
totp-lite = "1.0.3"
|
||||
|
||||
# Data encoding library
|
||||
data-encoding = "2.3.2"
|
||||
# Data encoding library Hex/Base32/Base64
|
||||
data-encoding = "2.3.3"
|
||||
|
||||
# JWT library
|
||||
jsonwebtoken = "7.2.0"
|
||||
jsonwebtoken = "8.2.0"
|
||||
|
||||
# U2F library
|
||||
u2f = "0.2.0"
|
||||
webauthn-rs = "0.3.0-alpha.12"
|
||||
# TOTP library
|
||||
totp-lite = "2.0.0"
|
||||
|
||||
# Yubico Library
|
||||
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
|
||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||
|
||||
# A `dotenv` implementation for Rust
|
||||
dotenv = { version = "0.15.0", default-features = false }
|
||||
# WebAuthn libraries
|
||||
webauthn-rs = "0.3.2"
|
||||
|
||||
# Lazy initialization
|
||||
once_cell = "1.8.0"
|
||||
# Handling of URL's for WebAuthn
|
||||
url = "2.3.1"
|
||||
|
||||
# Numerical libraries
|
||||
num-traits = "0.2.14"
|
||||
num-derive = "0.3.3"
|
||||
|
||||
# Email libraries
|
||||
tracing = { version = "0.1.29", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
|
||||
lettre = { version = "0.10.0-rc.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||
# Email librariese-Base, Update crates and small change.
|
||||
lettre = { version = "0.10.1", features = ["smtp-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||
percent-encoding = "2.2.0" # URL encoding library used for URL's in the emails
|
||||
email_address = "0.2.4"
|
||||
|
||||
# Template library
|
||||
handlebars = { version = "4.1.3", features = ["dir_source"] }
|
||||
handlebars = { version = "4.3.5", features = ["dir_source"] }
|
||||
|
||||
# HTTP client
|
||||
reqwest = { version = "0.11.13", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
|
||||
|
||||
# For favicon extraction from main website
|
||||
html5ever = "0.25.1"
|
||||
markup5ever_rcdom = "0.1.0"
|
||||
regex = { version = "1.5.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.1.0"
|
||||
html5gum = "0.5.2"
|
||||
regex = { version = "1.7.0", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||
data-url = "0.2.0"
|
||||
bytes = "1.3.0"
|
||||
cached = "0.40.0"
|
||||
|
||||
# Used for custom short lived cookie jar during favicon extraction
|
||||
cookie = "0.16.1"
|
||||
cookie_store = "0.19.0"
|
||||
|
||||
# Used by U2F, JWT and Postgres
|
||||
openssl = "0.10.36"
|
||||
|
||||
# URL encoding library
|
||||
percent-encoding = "2.1.0"
|
||||
# Punycode conversion
|
||||
idna = "0.2.3"
|
||||
openssl = "0.10.44"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.4.2"
|
||||
|
||||
# Logging panics to logfile instead stderr only
|
||||
backtrace = "0.3.61"
|
||||
pico-args = "0.5.0"
|
||||
|
||||
# Macro ident concatenation
|
||||
paste = "1.0.5"
|
||||
paste = "1.0.10"
|
||||
governor = "0.5.1"
|
||||
|
||||
# Check client versions for specific features.
|
||||
semver = "1.0.14"
|
||||
|
||||
# Allow overriding the default memory allocator
|
||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||
mimalloc = { version = "0.1.32", features = ["secure"], default-features = false, optional = true }
|
||||
|
||||
[patch.crates-io]
|
||||
# Use newest ring
|
||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||
# Using a patched version of multer-rs (Used by Rocket) to fix attachment/send file uploads
|
||||
# Issue: https://github.com/dani-garcia/vaultwarden/issues/2644
|
||||
# Patch: https://github.com/BlackDex/multer-rs/commit/477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a
|
||||
multer = { git = "https://github.com/BlackDex/multer-rs", rev = "477d16b7fa0f361b5c2a5ba18a5b28bec6d26a8a" }
|
||||
|
||||
# For favicon extraction from main website
|
||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = 'eb7330b5296c0d43816d1346211b74182bb4ae37' }
|
||||
|
||||
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
|
||||
# to any issues or PRs for almost a year (as of April 2021). This hopefully
|
||||
# temporary fork updates Cargo.toml to use more up-to-date dependencies.
|
||||
# In particular, `cron` has since implemented parsing of some common syntax
|
||||
# that wasn't previously supported (https://github.com/zslayton/cron/pull/64).
|
||||
job_scheduler = { git = 'https://github.com/jjlin/job_scheduler', rev = 'ee023418dbba2bfe1e30a5fd7d937f9e33739806' }
|
||||
# Strip debuginfo from the release builds
|
||||
# Also enable thin LTO for some optimizations
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
lto = "thin"
|
||||
|
||||
20
README.md
@@ -7,12 +7,12 @@
|
||||
[](https://hub.docker.com/r/vaultwarden/server)
|
||||
[](https://deps.rs/repo/github/dani-garcia/vaultwarden)
|
||||
[](https://github.com/dani-garcia/vaultwarden/releases/latest)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/master/LICENSE.txt)
|
||||
[](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt)
|
||||
[](https://matrix.to/#/#vaultwarden:matrix.org)
|
||||
|
||||
Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden).
|
||||
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor 8bit Solutions LLC.**
|
||||
**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.**
|
||||
|
||||
#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels.
|
||||
|
||||
@@ -58,32 +58,34 @@ If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org]
|
||||
### Sponsors
|
||||
Thanks for your contribution to the project!
|
||||
|
||||
<!--
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/netdadaltd">
|
||||
<img src="https://avatars.githubusercontent.com/u/77323954?s=75&v=4" width="75px;" alt="netdadaltd"/>
|
||||
<a href="https://github.com/username">
|
||||
<img src="https://avatars.githubusercontent.com/u/725423?s=75&v=4" width="75px;" alt="username"/>
|
||||
<br />
|
||||
<sub><b>netDada Ltd.</b></sub>
|
||||
<sub><b>username</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<br/>
|
||||
-->
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/Gyarbij" style="width: 75px">
|
||||
<sub><b>Chono N</b></sub>
|
||||
<a href="https://github.com/themightychris" style="width: 75px">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/themightychris">
|
||||
<sub><b>Chris Alfano</b></sub>
|
||||
<a href="https://github.com/numberly" style="width: 75px">
|
||||
<sub><b>Numberly</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
[global.limits]
|
||||
json = 10485760 # 10 MiB
|
||||
56
build.rs
@@ -9,17 +9,25 @@ fn main() {
|
||||
println!("cargo:rustc-cfg=mysql");
|
||||
#[cfg(feature = "postgresql")]
|
||||
println!("cargo:rustc-cfg=postgresql");
|
||||
#[cfg(feature = "query_logger")]
|
||||
println!("cargo:rustc-cfg=query_logger");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!(
|
||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||
);
|
||||
|
||||
if let Ok(version) = env::var("BWRS_VERSION") {
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
} else {
|
||||
read_git_info().ok();
|
||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||
compile_error!("Query Logging is only allowed during development, it is not intented for production usage!");
|
||||
|
||||
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
|
||||
// If neither exist, read from git.
|
||||
let maybe_vaultwarden_version =
|
||||
env::var("VW_VERSION").or_else(|_| env::var("BWRS_VERSION")).or_else(|_| version_from_git_info());
|
||||
|
||||
if let Ok(version) = maybe_vaultwarden_version {
|
||||
println!("cargo:rustc-env=VW_VERSION={version}");
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={version}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,46 +41,40 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
||||
}
|
||||
|
||||
/// This method reads info from Git, namely tags, branch, and revision
|
||||
fn read_git_info() -> Result<(), std::io::Error> {
|
||||
/// To access these values, use:
|
||||
/// - env!("GIT_EXACT_TAG")
|
||||
/// - env!("GIT_LAST_TAG")
|
||||
/// - env!("GIT_BRANCH")
|
||||
/// - env!("GIT_REV")
|
||||
/// - env!("VW_VERSION")
|
||||
fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||
// The exact tag for the current commit, can be empty when
|
||||
// the current commit doesn't have an associated tag
|
||||
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
|
||||
if let Some(ref exact) = exact_tag {
|
||||
println!("cargo:rustc-env=GIT_EXACT_TAG={}", exact);
|
||||
println!("cargo:rustc-env=GIT_EXACT_TAG={exact}");
|
||||
}
|
||||
|
||||
// The last available tag, equal to exact_tag when
|
||||
// the current commit is tagged
|
||||
let last_tag = run(&["git", "describe", "--abbrev=0", "--tags"])?;
|
||||
println!("cargo:rustc-env=GIT_LAST_TAG={}", last_tag);
|
||||
println!("cargo:rustc-env=GIT_LAST_TAG={last_tag}");
|
||||
|
||||
// The current branch name
|
||||
let branch = run(&["git", "rev-parse", "--abbrev-ref", "HEAD"])?;
|
||||
println!("cargo:rustc-env=GIT_BRANCH={}", branch);
|
||||
println!("cargo:rustc-env=GIT_BRANCH={branch}");
|
||||
|
||||
// The current git commit hash
|
||||
let rev = run(&["git", "rev-parse", "HEAD"])?;
|
||||
let rev_short = rev.get(..8).unwrap_or_default();
|
||||
println!("cargo:rustc-env=GIT_REV={}", rev_short);
|
||||
println!("cargo:rustc-env=GIT_REV={rev_short}");
|
||||
|
||||
// Combined version
|
||||
let version = if let Some(exact) = exact_tag {
|
||||
exact
|
||||
if let Some(exact) = exact_tag {
|
||||
Ok(exact)
|
||||
} else if &branch != "main" && &branch != "master" {
|
||||
format!("{}-{} ({})", last_tag, rev_short, branch)
|
||||
Ok(format!("{last_tag}-{rev_short} ({branch})"))
|
||||
} else {
|
||||
format!("{}-{}", last_tag, rev_short)
|
||||
};
|
||||
|
||||
println!("cargo:rustc-env=BWRS_VERSION={}", version);
|
||||
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
|
||||
|
||||
// To access these values, use:
|
||||
// env!("GIT_EXACT_TAG")
|
||||
// env!("GIT_LAST_TAG")
|
||||
// env!("GIT_BRANCH")
|
||||
// env!("GIT_REV")
|
||||
// env!("BWRS_VERSION")
|
||||
|
||||
Ok(())
|
||||
Ok(format!("{last_tag}-{rev_short}"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||
# manifest, rather than the target arch. For example:
|
||||
#
|
||||
|
||||
@@ -3,31 +3,39 @@
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
{% set build_stage_base_image = "rust:1.55-buster" %}
|
||||
{% set build_stage_base_image = "rust:1.66-bullseye" %}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
{% set build_stage_base_image = "clux/muslrust:nightly-2021-10-06" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.14" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "alpine:3.17" %}
|
||||
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.14" %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.17" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.17" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-stable-1.66.0" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.17" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
|
||||
{% endif %}
|
||||
{% elif "amd64" in target_file %}
|
||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||
{% set runtime_stage_base_image = "debian:bullseye-slim" %}
|
||||
{% elif "arm64" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:bullseye" %}
|
||||
{% set package_arch_name = "arm64" %}
|
||||
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||
{% elif "armv6" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:bullseye" %}
|
||||
{% set package_arch_name = "armel" %}
|
||||
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||
{% elif "armv7" in target_file %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:bullseye" %}
|
||||
{% set package_arch_name = "armhf" %}
|
||||
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||
@@ -51,8 +59,8 @@
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
{% set vault_version = "2.23.0c" %}
|
||||
{% set vault_image_digest = "sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459" %}
|
||||
{% set vault_version = "v2022.12.0" %}
|
||||
{% set vault_image_digest = "sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e" %}
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
@@ -62,35 +70,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:v{{ vault_version }}
|
||||
# $ docker pull vaultwarden/web-vault:{{ vault_version }}
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" vaultwarden/web-vault:{{ vault_version }}
|
||||
# [vaultwarden/web-vault@{{ vault_image_digest }}]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" vaultwarden/web-vault@{{ vault_image_digest }}
|
||||
# [vaultwarden/web-vault:v{{ vault_version }}]
|
||||
# [vaultwarden/web-vault:{{ vault_version }}]
|
||||
#
|
||||
FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM {{ build_stage_base_image }} as build
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "amd64" in target_file %}
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
{% set features = "sqlite,postgresql" %}
|
||||
{% else %}
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
|
||||
ARG DB=sqlite,vendored_openssl
|
||||
{% set features = "sqlite" %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% set features = "sqlite,mysql,postgresql" %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -100,69 +93,43 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
{# {% if "alpine" not in target_file and "buildx" in target_file %}
|
||||
# Debian based Buildx builds can use some special apt caching to speedup building.
|
||||
# By default Debian based images have some rules to keep docker builds clean, we need to remove this.
|
||||
# See: https://hub.docker.com/r/docker/dockerfile
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
{% endif %} #}
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
{% if "alpine" in target_file %}
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
{% if "armv7" in target_file %}
|
||||
{#- https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html -#}
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
{% if "armv6" in target_file %}
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/{{ package_arch_target }}/lib/libatomic.a'
|
||||
{% endif %}
|
||||
{% elif "arm" in target_file %}
|
||||
# NOTE: Any apt-get/dpkg after this stage will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
#
|
||||
# Install required build libs for {{ package_arch_name }} architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > /etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture {{ package_arch_name }} \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture {{ package_arch_name }} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev{{ package_arch_prefix }} \
|
||||
libc6-dev{{ package_arch_prefix }} \
|
||||
libpq5{{ package_arch_prefix }} \
|
||||
libpq-dev \
|
||||
libmariadb3:amd64 \
|
||||
libpq-dev{{ package_arch_prefix }} \
|
||||
libmariadb3{{ package_arch_prefix }} \
|
||||
libmariadb-dev{{ package_arch_prefix }} \
|
||||
libmariadb-dev-compat{{ package_arch_prefix }} \
|
||||
gcc-{{ package_cross_compiler }} \
|
||||
#
|
||||
# Manual install libmariadb-dev-compat:amd64 ( After this broken dependencies will break apt )
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
#
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||
|
||||
{% elif "amd64" in target_file %}
|
||||
# Install DB packages
|
||||
@@ -188,6 +155,14 @@ COPY ./build.rs ./build.rs
|
||||
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
|
||||
{% endif %}
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
{% if "alpine" in target_file %}
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
{% else %}
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
{% endif %}
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -203,26 +178,22 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
{% if "alpine" in target_file %}
|
||||
{% if "armv7" in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM {{ runtime_stage_base_image }}
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
{% if "alpine" in runtime_stage_base_image %}
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
{%- if "alpine" in runtime_stage_base_image %} \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -235,13 +206,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
{% if "mysql" in features %}
|
||||
mariadb-connector-c \
|
||||
{% endif %}
|
||||
{% if "postgresql" in features %}
|
||||
postgresql-libs \
|
||||
{% endif %}
|
||||
ca-certificates
|
||||
{% else %}
|
||||
&& apt-get update && apt-get install -y \
|
||||
@@ -249,13 +213,20 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
{% endif %}
|
||||
|
||||
{% if "armv6" in target_file and "alpine" not in target_file %}
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{% if "amd64" not in target_file %}
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
@@ -268,7 +239,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
{% if package_arch_target is defined %}
|
||||
COPY --from=build /app/target/{{ package_arch_target }}/release/vaultwarden .
|
||||
@@ -281,6 +251,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.55-buster as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -64,6 +63,9 @@ COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -79,16 +81,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
@@ -98,7 +101,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -112,7 +114,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
@@ -121,6 +122,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM clux/muslrust:nightly-2021-10-06 as build
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -45,7 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -58,6 +56,10 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -73,17 +75,19 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.14
|
||||
FROM alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
@@ -92,8 +96,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
postgresql-libs \
|
||||
ca-certificates
|
||||
|
||||
|
||||
@@ -104,7 +106,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
@@ -113,6 +114,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.55-buster as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -64,6 +63,9 @@ COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -79,16 +81,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM debian:buster-slim
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
@@ -98,7 +101,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -112,7 +114,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/release/vaultwarden .
|
||||
|
||||
@@ -121,6 +122,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM clux/muslrust:nightly-2021-10-06 as build
|
||||
FROM blackdex/rust-musl:x86_64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||
ARG DB=sqlite,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -45,7 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -58,6 +56,10 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -73,17 +75,19 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM alpine:3.14
|
||||
FROM alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
@@ -92,8 +96,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
postgresql-libs \
|
||||
ca-certificates
|
||||
|
||||
|
||||
@@ -104,7 +106,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
@@ -113,6 +114,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.55-buster as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -45,51 +44,32 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# NOTE: Any apt-get/dpkg after this stage will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > /etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev \
|
||||
libmariadb3:amd64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
#
|
||||
# Manual install libmariadb-dev-compat:amd64 ( After this broken dependencies will break apt )
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
#
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -103,6 +83,9 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -118,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -139,7 +123,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -155,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
@@ -164,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
121
docker/arm64/Dockerfile.alpine
Normal file
@@ -0,0 +1,121 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add aarch64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.55-buster as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -45,51 +44,32 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# NOTE: Any apt-get/dpkg after this stage will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
#
|
||||
# Install required build libs for arm64 architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > /etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture arm64 \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture arm64 \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:arm64 \
|
||||
libc6-dev:arm64 \
|
||||
libpq5:arm64 \
|
||||
libpq-dev \
|
||||
libmariadb3:amd64 \
|
||||
libpq-dev:arm64 \
|
||||
libmariadb3:arm64 \
|
||||
libmariadb-dev:arm64 \
|
||||
libmariadb-dev-compat:arm64 \
|
||||
gcc-aarch64-linux-gnu \
|
||||
#
|
||||
# Manual install libmariadb-dev-compat:amd64 ( After this broken dependencies will break apt )
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
#
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -103,6 +83,9 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -118,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-debian:buster
|
||||
FROM balenalib/aarch64-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -139,7 +123,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -155,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
|
||||
|
||||
@@ -164,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
121
docker/arm64/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,121 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:aarch64-musl-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/aarch64-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.55-buster as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -45,51 +44,32 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# NOTE: Any apt-get/dpkg after this stage will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > /etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev \
|
||||
libmariadb3:amd64 \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
#
|
||||
# Manual install libmariadb-dev-compat:amd64 ( After this broken dependencies will break apt )
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
#
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -103,6 +83,9 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -118,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -139,12 +123,16 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
@@ -155,7 +143,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
@@ -164,6 +151,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
123
docker/armv6/Dockerfile.alpine
Normal file
@@ -0,0 +1,123 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add arm-unknown-linux-musleabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.55-buster as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -45,51 +44,32 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# NOTE: Any apt-get/dpkg after this stage will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
#
|
||||
# Install required build libs for armel architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > /etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armel \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture armel \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armel \
|
||||
libc6-dev:armel \
|
||||
libpq5:armel \
|
||||
libpq-dev \
|
||||
libmariadb3:amd64 \
|
||||
libpq-dev:armel \
|
||||
libmariadb3:armel \
|
||||
libmariadb-dev:armel \
|
||||
libmariadb-dev-compat:armel \
|
||||
gcc-arm-linux-gnueabi \
|
||||
#
|
||||
# Manual install libmariadb-dev-compat:amd64 ( After this broken dependencies will break apt )
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
#
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -103,6 +83,9 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -118,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-debian:buster
|
||||
FROM balenalib/rpi-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -139,12 +123,16 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# In the Balena Bullseye images for armv6/rpi-debian there is a missing symlink.
|
||||
# This symlink was there in the buster images, and for some reason this is needed.
|
||||
# hadolint ignore=DL3059
|
||||
RUN ln -v -s /lib/ld-linux-armhf.so.3 /lib/ld-linux.so.3
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
@@ -155,7 +143,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
|
||||
|
||||
@@ -164,6 +151,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
123
docker/armv6/Dockerfile.buildx.alpine
Normal file
@@ -0,0 +1,123 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# This file was generated using a Jinja2 template.
|
||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||
|
||||
# Using multistage build:
|
||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||
####################### VAULT BUILD IMAGE #######################
|
||||
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||
# Using the digest instead of the tag name provides better security,
|
||||
# as the digest of an image is immutable, whereas a tag name can later
|
||||
# be changed to point to a malicious image.
|
||||
#
|
||||
# To verify the current digest for a given tag name:
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM blackdex/rust-musl:arm-musleabi-stable-1.66.0 as build
|
||||
|
||||
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
LANG=C.UTF-8 \
|
||||
TZ=UTC \
|
||||
TERM=xterm-256color \
|
||||
CARGO_HOME="/root/.cargo" \
|
||||
USER="root"
|
||||
|
||||
|
||||
# Create CARGO_HOME folder and don't download rust docs
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# To be able to build the armv6 image with mimalloc we need to specifically specify the libatomic.a file location
|
||||
ENV RUSTFLAGS='-Clink-arg=/usr/local/musl/arm-unknown-linux-musleabi/lib/libatomic.a'
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copies over *only* your manifests and build files
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./rust-toolchain
|
||||
COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
|
||||
&& find . -not -path "./target*" -delete
|
||||
|
||||
# Copies the complete project
|
||||
# To avoid copying unneeded files, use .dockerignore
|
||||
COPY . .
|
||||
|
||||
# Make sure that we actually build the project
|
||||
RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/rpi-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
|
||||
# Create data folder and Install needed libraries
|
||||
RUN mkdir /data \
|
||||
&& apk add --no-cache \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-end" ]
|
||||
|
||||
VOLUME /data
|
||||
EXPOSE 80
|
||||
EXPOSE 3012
|
||||
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
|
||||
|
||||
COPY docker/healthcheck.sh /healthcheck.sh
|
||||
COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
CMD ["/start.sh"]
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.55-buster as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -45,51 +44,32 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# NOTE: Any apt-get/dpkg after this stage will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > /etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev \
|
||||
libmariadb3:amd64 \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
#
|
||||
# Manual install libmariadb-dev-compat:amd64 ( After this broken dependencies will break apt )
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
#
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -103,6 +83,9 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -118,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -139,7 +123,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -155,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
@@ -164,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,22 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.66.0 as build
|
||||
|
||||
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
|
||||
ARG DB=sqlite,vendored_openssl
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -46,8 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -60,6 +56,10 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -75,19 +75,19 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.14
|
||||
FROM balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -98,7 +98,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -111,7 +110,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
@@ -120,6 +118,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,21 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM rust:1.55-buster as build
|
||||
FROM rust:1.66-bullseye as build
|
||||
|
||||
|
||||
# Debian-based builds support multidb
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -45,51 +44,32 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
# NOTE: Any apt-get/dpkg after this stage will fail because of broken dependencies.
|
||||
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||
# What we can do is a force install, because nothing important is overlapping each other.
|
||||
#
|
||||
# Install required build libs for armhf architecture.
|
||||
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > /etc/apt/sources.list.d/deb-src.list \
|
||||
&& dpkg --add-architecture armhf \
|
||||
# hadolint ignore=DL3059
|
||||
RUN dpkg --add-architecture armhf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libssl-dev:armhf \
|
||||
libc6-dev:armhf \
|
||||
libpq5:armhf \
|
||||
libpq-dev \
|
||||
libmariadb3:amd64 \
|
||||
libpq-dev:armhf \
|
||||
libmariadb3:armhf \
|
||||
libmariadb-dev:armhf \
|
||||
libmariadb-dev-compat:armhf \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
#
|
||||
# Manual install libmariadb-dev-compat:amd64 ( After this broken dependencies will break apt )
|
||||
&& apt-get download libmariadb-dev-compat:amd64 \
|
||||
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
|
||||
&& rm -rvf ./libmariadb-dev-compat*.deb \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
#
|
||||
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||
# Without this specific file the ld command will fail and compilation fails with it.
|
||||
&& ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so \
|
||||
#
|
||||
# Make sure cargo has the right target config
|
||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
|
||||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
|
||||
|
||||
# Set arm specific environment values
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||
ENV CROSS_COMPILE="1"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
|
||||
CROSS_COMPILE="1" \
|
||||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
|
||||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
@@ -103,6 +83,9 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
ARG DB=sqlite,mysql,postgresql
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -118,16 +101,17 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
# hadolint ignore=DL3059
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-debian:buster
|
||||
FROM balenalib/armv7hf-debian:bullseye
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -139,7 +123,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dumb-init \
|
||||
libmariadb-dev-compat \
|
||||
libpq5 \
|
||||
&& apt-get clean \
|
||||
@@ -155,7 +138,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
|
||||
|
||||
@@ -164,6 +146,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -16,22 +16,20 @@
|
||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||
# click the tag name to view the digest of the image it currently points to.
|
||||
# - From the command line:
|
||||
# $ docker pull vaultwarden/web-vault:v2.23.0c
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.23.0c
|
||||
# [vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459]
|
||||
# $ docker pull vaultwarden/web-vault:v2022.12.0
|
||||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2022.12.0
|
||||
# [vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e]
|
||||
#
|
||||
# - Conversely, to get the tag name from the digest:
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459
|
||||
# [vaultwarden/web-vault:v2.23.0c]
|
||||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e
|
||||
# [vaultwarden/web-vault:v2022.12.0]
|
||||
#
|
||||
FROM vaultwarden/web-vault@sha256:dc94d303def3583af08816e91803f1c42107645612440f474f553f0cb0f97459 as vault
|
||||
FROM vaultwarden/web-vault@sha256:068ac863d52a5626568ae3c7f93a509f87c76b1b15821b101f2707724df9da3e as vault
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||
FROM blackdex/rust-musl:armv7-musleabihf-stable-1.66.0 as build
|
||||
|
||||
|
||||
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
|
||||
ARG DB=sqlite,vendored_openssl
|
||||
|
||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
@@ -46,8 +44,6 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
|
||||
&& rustup set profile minimal
|
||||
|
||||
ENV RUSTFLAGS='-C link-arg=-s'
|
||||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
|
||||
|
||||
# Creates a dummy project used to grab dependencies
|
||||
RUN USER=root cargo new --bin /app
|
||||
@@ -60,6 +56,10 @@ COPY ./build.rs ./build.rs
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
|
||||
|
||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||
# Enable MiMalloc to improve performance on Alpine builds
|
||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||
|
||||
# Builds your dependencies and removes the
|
||||
# dummy project, except the target folder
|
||||
# This folder contains the compiled dependencies
|
||||
@@ -75,19 +75,19 @@ RUN touch src/main.rs
|
||||
|
||||
# Builds again, this time it'll just be
|
||||
# your actual source files being built
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
# hadolint ignore=DL3059
|
||||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
|
||||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||
|
||||
######################## RUNTIME IMAGE ########################
|
||||
# Create a new stage with a minimal image
|
||||
# because we already have a binary built
|
||||
FROM balenalib/armv7hf-alpine:3.14
|
||||
FROM balenalib/armv7hf-alpine:3.17
|
||||
|
||||
ENV ROCKET_PROFILE="release" \
|
||||
ROCKET_ADDRESS=0.0.0.0 \
|
||||
ROCKET_PORT=80 \
|
||||
SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
ENV ROCKET_ENV "staging"
|
||||
ENV ROCKET_PORT=80
|
||||
ENV ROCKET_WORKERS=10
|
||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
RUN [ "cross-build-start" ]
|
||||
@@ -98,7 +98,6 @@ RUN mkdir /data \
|
||||
openssl \
|
||||
tzdata \
|
||||
curl \
|
||||
dumb-init \
|
||||
ca-certificates
|
||||
|
||||
# hadolint ignore=DL3059
|
||||
@@ -111,7 +110,6 @@ EXPOSE 3012
|
||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
||||
# and the binary from the "build" stage to the current stage
|
||||
WORKDIR /
|
||||
COPY Rocket.toml .
|
||||
COPY --from=vault /web-vault ./web-vault
|
||||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
|
||||
|
||||
@@ -120,6 +118,4 @@ COPY docker/start.sh /start.sh
|
||||
|
||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
||||
|
||||
# Configures the startup!
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||
CMD ["/start.sh"]
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
# Use the value of the corresponding env var (if present),
|
||||
# or a default value otherwise.
|
||||
: ${DATA_FOLDER:="data"}
|
||||
: ${ROCKET_PORT:="80"}
|
||||
: "${DATA_FOLDER:="data"}"
|
||||
: "${ROCKET_PORT:="80"}"
|
||||
|
||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||
|
||||
@@ -45,9 +45,13 @@ if [ -r "${CONFIG_FILE}" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
addr="${ROCKET_ADDRESS}"
|
||||
if [ -z "${addr}" ] || [ "${addr}" = '0.0.0.0' ] || [ "${addr}" = '::' ]; then
|
||||
addr='localhost'
|
||||
fi
|
||||
base_path="$(get_base_path "${DOMAIN}")"
|
||||
if [ -n "${ROCKET_TLS}" ]; then
|
||||
s='s'
|
||||
fi
|
||||
curl --insecure --fail --silent --show-error \
|
||||
"http${s}://localhost:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||
"http${s}://${addr}:${ROCKET_PORT}${base_path}/alive" || exit 1
|
||||
|
||||
@@ -9,15 +9,15 @@ fi
|
||||
|
||||
if [ -d /etc/vaultwarden.d ]; then
|
||||
for f in /etc/vaultwarden.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
if [ -r "${f}" ]; then
|
||||
. "${f}"
|
||||
fi
|
||||
done
|
||||
elif [ -d /etc/bitwarden_rs.d ]; then
|
||||
echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###"
|
||||
for f in /etc/bitwarden_rs.d/*.sh; do
|
||||
if [ -r $f ]; then
|
||||
. $f
|
||||
if [ -r "${f}" ]; then
|
||||
. "${f}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
@@ -7,10 +7,5 @@ arches=(
|
||||
)
|
||||
|
||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||
# The Alpine image build currently only works for certain arches.
|
||||
distro_suffix=.alpine
|
||||
arches=(
|
||||
amd64
|
||||
armv7
|
||||
)
|
||||
fi
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE twofactor_incomplete;
|
||||
@@ -0,0 +1,9 @@
|
||||
CREATE TABLE twofactor_incomplete (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
|
||||
device_uuid CHAR(36) NOT NULL,
|
||||
device_name TEXT NOT NULL,
|
||||
login_time DATETIME NOT NULL,
|
||||
ip_address TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (user_uuid, device_uuid)
|
||||
);
|
||||
2
migrations/mysql/2022-01-17-234911_add_api_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN api_key VARCHAR(255);
|
||||
@@ -0,0 +1,4 @@
|
||||
-- First remove the previous primary key
|
||||
ALTER TABLE devices DROP PRIMARY KEY;
|
||||
-- Add a new combined one
|
||||
ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid);
|
||||
@@ -0,0 +1,3 @@
|
||||
DROP TABLE `groups`;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
||||
23
migrations/mysql/2022-07-27-110000_add_group_support/up.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE `groups` (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
organizations_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||
name VARCHAR(100) NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id VARCHAR(300) NULL,
|
||||
creation_date DATETIME NOT NULL,
|
||||
revision_date DATETIME NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES `groups` (uuid),
|
||||
users_organizations_uuid VARCHAR(36) NOT NULL REFERENCES users_organizations (uuid),
|
||||
UNIQUE (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid VARCHAR(40) NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES `groups` (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
UNIQUE (collections_uuid, groups_uuid)
|
||||
);
|
||||
1
migrations/mysql/2022-10-18-170602_add_events/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
||||
19
migrations/mysql/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid CHAR(36),
|
||||
org_uuid CHAR(36),
|
||||
cipher_uuid CHAR(36),
|
||||
collection_uuid CHAR(36),
|
||||
group_uuid CHAR(36),
|
||||
org_user_uuid CHAR(36),
|
||||
act_user_uuid CHAR(36),
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date DATETIME NOT NULL,
|
||||
policy_uuid CHAR(36),
|
||||
provider_uuid CHAR(36),
|
||||
provider_user_uuid CHAR(36),
|
||||
provider_org_uuid CHAR(36),
|
||||
UNIQUE (uuid)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE twofactor_incomplete;
|
||||
@@ -0,0 +1,9 @@
|
||||
CREATE TABLE twofactor_incomplete (
|
||||
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
|
||||
device_uuid VARCHAR(40) NOT NULL,
|
||||
device_name TEXT NOT NULL,
|
||||
login_time TIMESTAMP NOT NULL,
|
||||
ip_address TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (user_uuid, device_uuid)
|
||||
);
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN api_key TEXT;
|
||||
@@ -0,0 +1,4 @@
|
||||
-- First remove the previous primary key
|
||||
ALTER TABLE devices DROP CONSTRAINT devices_pkey;
|
||||
-- Add a new combined one
|
||||
ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid);
|
||||
@@ -0,0 +1,3 @@
|
||||
DROP TABLE groups;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
||||
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE groups (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
organizations_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid),
|
||||
name VARCHAR(100) NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id VARCHAR(300) NULL,
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
revision_date TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES groups (uuid),
|
||||
users_organizations_uuid VARCHAR(36) NOT NULL REFERENCES users_organizations (uuid),
|
||||
PRIMARY KEY (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid VARCHAR(40) NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid CHAR(36) NOT NULL REFERENCES groups (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
PRIMARY KEY (collections_uuid, groups_uuid)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
||||
19
migrations/postgresql/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid CHAR(36) NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid CHAR(36),
|
||||
org_uuid CHAR(36),
|
||||
cipher_uuid CHAR(36),
|
||||
collection_uuid CHAR(36),
|
||||
group_uuid CHAR(36),
|
||||
org_user_uuid CHAR(36),
|
||||
act_user_uuid CHAR(36),
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date TIMESTAMP NOT NULL,
|
||||
policy_uuid CHAR(36),
|
||||
provider_uuid CHAR(36),
|
||||
provider_user_uuid CHAR(36),
|
||||
provider_org_uuid CHAR(36),
|
||||
UNIQUE (uuid)
|
||||
);
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE twofactor_incomplete;
|
||||
@@ -0,0 +1,9 @@
|
||||
CREATE TABLE twofactor_incomplete (
|
||||
user_uuid TEXT NOT NULL REFERENCES users(uuid),
|
||||
device_uuid TEXT NOT NULL,
|
||||
device_name TEXT NOT NULL,
|
||||
login_time DATETIME NOT NULL,
|
||||
ip_address TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY (user_uuid, device_uuid)
|
||||
);
|
||||
2
migrations/sqlite/2022-01-17-234911_add_api_key/up.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE users
|
||||
ADD COLUMN api_key TEXT;
|
||||
@@ -0,0 +1,23 @@
|
||||
-- Create new devices table with primary keys on both uuid and user_uuid
|
||||
CREATE TABLE devices_new (
|
||||
uuid TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL,
|
||||
updated_at DATETIME NOT NULL,
|
||||
user_uuid TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
atype INTEGER NOT NULL,
|
||||
push_token TEXT,
|
||||
refresh_token TEXT NOT NULL,
|
||||
twofactor_remember TEXT,
|
||||
PRIMARY KEY(uuid, user_uuid),
|
||||
FOREIGN KEY(user_uuid) REFERENCES users(uuid)
|
||||
);
|
||||
|
||||
-- Transfer current data to new table
|
||||
INSERT INTO devices_new SELECT * FROM devices;
|
||||
|
||||
-- Drop the old table
|
||||
DROP TABLE devices;
|
||||
|
||||
-- Rename the new table to the original name
|
||||
ALTER TABLE devices_new RENAME TO devices;
|
||||
@@ -0,0 +1,3 @@
|
||||
DROP TABLE groups;
|
||||
DROP TABLE groups_users;
|
||||
DROP TABLE collections_groups;
|
||||
23
migrations/sqlite/2022-07-27-110000_add_group_support/up.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE TABLE groups (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
organizations_uuid TEXT NOT NULL REFERENCES organizations (uuid),
|
||||
name TEXT NOT NULL,
|
||||
access_all BOOLEAN NOT NULL,
|
||||
external_id TEXT NULL,
|
||||
creation_date TIMESTAMP NOT NULL,
|
||||
revision_date TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE groups_users (
|
||||
groups_uuid TEXT NOT NULL REFERENCES groups (uuid),
|
||||
users_organizations_uuid TEXT NOT NULL REFERENCES users_organizations (uuid),
|
||||
UNIQUE (groups_uuid, users_organizations_uuid)
|
||||
);
|
||||
|
||||
CREATE TABLE collections_groups (
|
||||
collections_uuid TEXT NOT NULL REFERENCES collections (uuid),
|
||||
groups_uuid TEXT NOT NULL REFERENCES groups (uuid),
|
||||
read_only BOOLEAN NOT NULL,
|
||||
hide_passwords BOOLEAN NOT NULL,
|
||||
UNIQUE (collections_uuid, groups_uuid)
|
||||
);
|
||||
1
migrations/sqlite/2022-10-18-170602_add_events/down.sql
Normal file
@@ -0,0 +1 @@
|
||||
DROP TABLE event;
|
||||
19
migrations/sqlite/2022-10-18-170602_add_events/up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE event (
|
||||
uuid TEXT NOT NULL PRIMARY KEY,
|
||||
event_type INTEGER NOT NULL,
|
||||
user_uuid TEXT,
|
||||
org_uuid TEXT,
|
||||
cipher_uuid TEXT,
|
||||
collection_uuid TEXT,
|
||||
group_uuid TEXT,
|
||||
org_user_uuid TEXT,
|
||||
act_user_uuid TEXT,
|
||||
device_type INTEGER,
|
||||
ip_address TEXT,
|
||||
event_date DATETIME NOT NULL,
|
||||
policy_uuid TEXT,
|
||||
provider_uuid TEXT,
|
||||
provider_user_uuid TEXT,
|
||||
provider_org_uuid TEXT,
|
||||
UNIQUE (uuid)
|
||||
);
|
||||
93
resources/404.svg
Normal file
@@ -0,0 +1,93 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="500"
|
||||
height="222"
|
||||
viewBox="0 0 500 222"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
xml:space="preserve"
|
||||
inkscape:version="1.2.1 (9c6d41e410, 2022-07-14, custom)"
|
||||
sodipodi:docname="404.svg"
|
||||
inkscape:export-filename="404.png"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
|
||||
id="namedview7"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="px"
|
||||
showgrid="false"
|
||||
inkscape:zoom="1.3791767"
|
||||
inkscape:cx="284.59007"
|
||||
inkscape:cy="214.25826"
|
||||
inkscape:window-width="1916"
|
||||
inkscape:window-height="1038"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="18"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="layer1"
|
||||
showguides="false" /><defs
|
||||
id="defs2"><mask
|
||||
id="holes"><rect
|
||||
x="-60"
|
||||
y="-60"
|
||||
width="120"
|
||||
height="120"
|
||||
fill="#ffffff"
|
||||
id="rect3296" /><circle
|
||||
id="hole"
|
||||
cy="-40"
|
||||
r="3"
|
||||
cx="0" /><use
|
||||
transform="rotate(72)"
|
||||
xlink:href="#hole"
|
||||
id="use3299" /><use
|
||||
transform="rotate(144)"
|
||||
xlink:href="#hole"
|
||||
id="use3301" /><use
|
||||
transform="rotate(-144)"
|
||||
xlink:href="#hole"
|
||||
id="use3303" /><use
|
||||
transform="rotate(-72)"
|
||||
xlink:href="#hole"
|
||||
id="use3305" /></mask></defs><g
|
||||
inkscape:label="Ebene 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"><rect
|
||||
style="fill:none;fill-opacity:0.5;stroke:none;stroke-width:0.74;stroke-opacity:1"
|
||||
id="rect681"
|
||||
width="666"
|
||||
height="222"
|
||||
x="0"
|
||||
y="0" /><text
|
||||
xml:space="preserve"
|
||||
style="font-size:128px;line-height:1.25;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7;stroke-width:1"
|
||||
x="249.9375"
|
||||
y="134.8125"
|
||||
id="text3425"><tspan
|
||||
id="tspan3423"
|
||||
x="249.9375"
|
||||
y="134.8125"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:128px;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7;stroke-width:1"
|
||||
sodipodi:role="line">404</tspan></text><text
|
||||
xml:space="preserve"
|
||||
style="font-size:26.6667px;line-height:1.25;font-family:'Open Sans';-inkscape-font-specification:'Open Sans';text-align:center;text-anchor:middle"
|
||||
x="249.04297"
|
||||
y="194.68582"
|
||||
id="text4067"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan4065"
|
||||
x="249.04295"
|
||||
y="194.68582"
|
||||
style="font-size:26.6667px;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:0.7">Return to the web vault?</tspan></text></g></svg>
|
||||
|
After Width: | Height: | Size: 3.3 KiB |
|
Before Width: | Height: | Size: 8.7 KiB After Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 8.4 KiB After Width: | Height: | Size: 5.2 KiB |
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 6.5 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 6.5 KiB |
@@ -1 +1 @@
|
||||
nightly-2021-10-14
|
||||
1.66.0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
version = "Two"
|
||||
edition = "2018"
|
||||
# version = "Two"
|
||||
edition = "2021"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
use_small_heuristics = "Off"
|
||||
struct_lit_single_line = false
|
||||
overflow_delimited_expr = true
|
||||
# struct_lit_single_line = false
|
||||
# overflow_delimited_expr = true
|
||||
|
||||
480
src/api/admin.rs
@@ -1,18 +1,19 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::Value;
|
||||
use std::{env, time::Duration};
|
||||
use std::env;
|
||||
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{
|
||||
http::{Cookie, Cookies, SameSite, Status},
|
||||
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||
response::{content::Html, Flash, Redirect},
|
||||
Route,
|
||||
form::Form,
|
||||
http::{Cookie, CookieJar, MediaType, SameSite, Status},
|
||||
request::{FromRequest, Outcome, Request},
|
||||
response::{content::RawHtml as Html, Redirect},
|
||||
Catcher, Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
api::{core::log_event, ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||
config::ConfigBuilder,
|
||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||
@@ -21,7 +22,7 @@ use crate::{
|
||||
util::{
|
||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
||||
},
|
||||
CONFIG,
|
||||
CONFIG, VERSION,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
@@ -30,7 +31,6 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
routes![
|
||||
admin_login,
|
||||
get_users_json,
|
||||
get_user_json,
|
||||
post_admin_login,
|
||||
@@ -56,6 +56,14 @@ pub fn routes() -> Vec<Route> {
|
||||
]
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() {
|
||||
catchers![]
|
||||
} else {
|
||||
catchers![admin_login]
|
||||
}
|
||||
}
|
||||
|
||||
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||
DbConnType::from_url(&CONFIG.database_url())
|
||||
.map(|t| match t {
|
||||
@@ -74,33 +82,26 @@ fn admin_disabled() -> &'static str {
|
||||
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
|
||||
}
|
||||
|
||||
const COOKIE_NAME: &str = "BWRS_ADMIN";
|
||||
const COOKIE_NAME: &str = "VW_ADMIN";
|
||||
const ADMIN_PATH: &str = "/admin";
|
||||
const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z";
|
||||
|
||||
const BASE_TEMPLATE: &str = "admin/base";
|
||||
const VERSION: Option<&str> = option_env!("BWRS_VERSION");
|
||||
|
||||
const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000";
|
||||
|
||||
fn admin_path() -> String {
|
||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||
}
|
||||
|
||||
struct Referer(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct IpHeader(Option<String>);
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for IpHeader {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
if req.headers().get_one(&CONFIG.ip_header()).is_some() {
|
||||
Outcome::Success(IpHeader(Some(CONFIG.ip_header())))
|
||||
} else if req.headers().get_one("X-Client-IP").is_some() {
|
||||
@@ -115,35 +116,37 @@ impl<'a, 'r> FromRequest<'a, 'r> for IpHeader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Used for `Location` response headers, which must specify an absolute URI
|
||||
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||
fn admin_url(referer: Referer) -> String {
|
||||
// If we get a referer use that to make it work when, DOMAIN is not set
|
||||
if let Some(mut referer) = referer.0 {
|
||||
if let Some(start_index) = referer.find(ADMIN_PATH) {
|
||||
referer.truncate(start_index + ADMIN_PATH.len());
|
||||
return referer;
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.domain_set() {
|
||||
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
||||
// trailing slash there, particularly when running under a subpath.
|
||||
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
||||
} else {
|
||||
// Last case, when no referer or domain set, technically invalid but better than nothing
|
||||
ADMIN_PATH.to_string()
|
||||
}
|
||||
fn admin_url() -> String {
|
||||
format!("{}{}", CONFIG.domain_origin(), admin_path())
|
||||
}
|
||||
|
||||
#[get("/", rank = 2)]
|
||||
fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
#[derive(Responder)]
|
||||
enum AdminResponse {
|
||||
#[response(status = 200)]
|
||||
Ok(ApiResult<Html<String>>),
|
||||
#[response(status = 401)]
|
||||
Unauthorized(ApiResult<Html<String>>),
|
||||
#[response(status = 429)]
|
||||
TooManyRequests(ApiResult<Html<String>>),
|
||||
}
|
||||
|
||||
#[catch(401)]
|
||||
fn admin_login(request: &Request<'_>) -> ApiResult<Html<String>> {
|
||||
if request.format() == Some(&MediaType::JSON) {
|
||||
err_code!("Authorization failed.", Status::Unauthorized.code);
|
||||
}
|
||||
let redirect = request.segments::<std::path::PathBuf>(0..).unwrap_or_default().display().to_string();
|
||||
render_admin_login(None, Some(redirect))
|
||||
}
|
||||
|
||||
fn render_admin_login(msg: Option<&str>, redirect: Option<String>) -> ApiResult<Html<String>> {
|
||||
// If there is an error, show it
|
||||
let msg = flash.map(|msg| format!("{}: {}", msg.name(), msg.msg()));
|
||||
let msg = msg.map(|msg| format!("Error: {msg}"));
|
||||
let json = json!({
|
||||
"page_content": "admin/login",
|
||||
"version": VERSION,
|
||||
"error": msg,
|
||||
"redirect": redirect,
|
||||
"urlpath": CONFIG.domain_path()
|
||||
});
|
||||
|
||||
@@ -155,21 +158,25 @@ fn admin_login(flash: Option<FlashMessage>) -> ApiResult<Html<String>> {
|
||||
#[derive(FromForm)]
|
||||
struct LoginForm {
|
||||
token: String,
|
||||
redirect: Option<String>,
|
||||
}
|
||||
|
||||
#[post("/", data = "<data>")]
|
||||
fn post_admin_login(
|
||||
data: Form<LoginForm>,
|
||||
mut cookies: Cookies,
|
||||
ip: ClientIp,
|
||||
referer: Referer,
|
||||
) -> Result<Redirect, Flash<Redirect>> {
|
||||
fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> {
|
||||
let data = data.into_inner();
|
||||
let redirect = data.redirect;
|
||||
|
||||
if crate::ratelimit::check_limit_admin(&ip.ip).is_err() {
|
||||
return Err(AdminResponse::TooManyRequests(render_admin_login(
|
||||
Some("Too many requests, try again later."),
|
||||
redirect,
|
||||
)));
|
||||
}
|
||||
|
||||
// If the token is invalid, redirect to login page
|
||||
if !_validate_token(&data.token) {
|
||||
error!("Invalid admin token. IP: {}", ip.ip);
|
||||
Err(Flash::error(Redirect::to(admin_url(referer)), "Invalid admin token, please try again."))
|
||||
Err(AdminResponse::Unauthorized(render_admin_login(Some("Invalid admin token, please try again."), redirect)))
|
||||
} else {
|
||||
// If the token received is valid, generate JWT and save it as a cookie
|
||||
let claims = generate_admin_claims();
|
||||
@@ -177,13 +184,17 @@ fn post_admin_login(
|
||||
|
||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
||||
.path(admin_path())
|
||||
.max_age(time::Duration::minutes(20))
|
||||
.max_age(rocket::time::Duration::minutes(20))
|
||||
.same_site(SameSite::Strict)
|
||||
.http_only(true)
|
||||
.finish();
|
||||
|
||||
cookies.add(cookie);
|
||||
Ok(Redirect::to(admin_url(referer)))
|
||||
if let Some(redirect) = redirect {
|
||||
Ok(Redirect::to(format!("{}{}", admin_path(), redirect)))
|
||||
} else {
|
||||
Err(AdminResponse::Ok(render_admin_page()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,20 +246,24 @@ impl AdminTemplateData {
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/", rank = 1)]
|
||||
fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||
fn render_admin_page() -> ApiResult<Html<String>> {
|
||||
let text = AdminTemplateData::new().render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
|
||||
render_admin_page()
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct InviteData {
|
||||
email: String,
|
||||
}
|
||||
|
||||
fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn) {
|
||||
async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> {
|
||||
if let Some(user) = User::find_by_uuid(uuid, conn).await {
|
||||
Ok(user)
|
||||
} else {
|
||||
err_code!("User doesn't exist", Status::NotFound.code);
|
||||
@@ -256,128 +271,147 @@ fn get_user_or_404(uuid: &str, conn: &DbConn) -> ApiResult<User> {
|
||||
}
|
||||
|
||||
#[post("/invite", data = "<data>")]
|
||||
fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
let email = data.email.clone();
|
||||
if User::find_by_mail(&data.email, &conn).is_some() {
|
||||
if User::find_by_mail(&data.email, &mut conn).await.is_some() {
|
||||
err_code!("User already exists", Status::Conflict.code)
|
||||
}
|
||||
|
||||
let mut user = User::new(email);
|
||||
|
||||
// TODO: After try_blocks is stabilized, this can be made more readable
|
||||
// See: https://github.com/rust-lang/rust/issues/31436
|
||||
(|| {
|
||||
async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)?;
|
||||
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await
|
||||
} else {
|
||||
let invitation = Invitation::new(user.email.clone());
|
||||
invitation.save(&conn)?;
|
||||
let invitation = Invitation::new(&user.email);
|
||||
invitation.save(conn).await
|
||||
}
|
||||
}
|
||||
|
||||
user.save(&conn)
|
||||
})()
|
||||
.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
_generate_invite(&user, &mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
user.save(&mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?;
|
||||
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[post("/test/smtp", data = "<data>")]
|
||||
fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
||||
let data: InviteData = data.into_inner();
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_test(&data.email)
|
||||
mail::send_test(&data.email).await
|
||||
} else {
|
||||
err!("Mail is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/logout")]
|
||||
fn logout(mut cookies: Cookies, referer: Referer) -> Redirect {
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
Redirect::to(admin_url(referer))
|
||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
Redirect::to(admin_path())
|
||||
}
|
||||
|
||||
#[get("/users")]
|
||||
fn get_users_json(_token: AdminToken, conn: DbConn) -> Json<Value> {
|
||||
let users = User::get_all(&conn);
|
||||
let users_json: Vec<Value> = users.iter().map(|u| u.to_json(&conn)).collect();
|
||||
async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
||||
let mut users_json = Vec::new();
|
||||
for u in User::get_all(&mut conn).await {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
Json(Value::Array(users_json))
|
||||
}
|
||||
|
||||
#[get("/users/overview")]
|
||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let users = User::get_all(&conn);
|
||||
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||
let users_json: Vec<Value> = users
|
||||
.iter()
|
||||
.map(|u| {
|
||||
let mut usr = u.to_json(&conn);
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||
usr["last_active"] = match u.last_active(&conn) {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||
None => json!("Never"),
|
||||
};
|
||||
usr
|
||||
})
|
||||
.collect();
|
||||
async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let mut users_json = Vec::new();
|
||||
for u in User::get_all(&mut conn).await {
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
||||
usr["user_enabled"] = json!(u.enabled);
|
||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||
None => json!("Never"),
|
||||
};
|
||||
users_json.push(usr);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/users", json!(users_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>")]
|
||||
fn get_user_json(uuid: String, _token: AdminToken, conn: DbConn) -> JsonResult {
|
||||
let user = get_user_or_404(&uuid, &conn)?;
|
||||
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||
let u = get_user_or_404(&uuid, &mut conn).await?;
|
||||
let mut usr = u.to_json(&mut conn).await;
|
||||
usr["UserEnabled"] = json!(u.enabled);
|
||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||
Ok(Json(usr))
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/delete")]
|
||||
fn delete_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &conn)?;
|
||||
user.delete(&conn)
|
||||
async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: ClientIp) -> EmptyResult {
|
||||
let user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
|
||||
// Get the user_org records before deleting the actual user
|
||||
let user_orgs = UserOrganization::find_any_state_by_user(&uuid, &mut conn).await;
|
||||
let res = user.delete(&mut conn).await;
|
||||
|
||||
for user_org in user_orgs {
|
||||
log_event(
|
||||
EventType::OrganizationUserRemoved as i32,
|
||||
&user_org.uuid,
|
||||
user_org.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/deauth")]
|
||||
fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn deauth_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/disable")]
|
||||
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn disable_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.enabled = false;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/enable")]
|
||||
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
async fn enable_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
user.enabled = true;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/<uuid>/remove-2fa")]
|
||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &conn)?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
async fn remove_2fa(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let mut user = get_user_or_404(&uuid, &mut conn).await?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -388,13 +422,19 @@ struct UserOrgTypeData {
|
||||
}
|
||||
|
||||
#[post("/users/org_type", data = "<data>")]
|
||||
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
async fn update_user_org_type(
|
||||
data: Json<UserOrgTypeData>,
|
||||
_token: AdminToken,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
let data: UserOrgTypeData = data.into_inner();
|
||||
|
||||
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
let mut user_to_edit =
|
||||
match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("The specified user isn't member of the organization"),
|
||||
};
|
||||
|
||||
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||
Some(new_type) => new_type as i32,
|
||||
@@ -402,46 +442,66 @@ fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: D
|
||||
};
|
||||
|
||||
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||
// Removing owner permmission, check that there are at least another owner
|
||||
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||
|
||||
if num_owners <= 1 {
|
||||
// Removing owner permmission, check that there is at least one other confirmed owner
|
||||
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 {
|
||||
err!("Can't change the type of the last owner")
|
||||
}
|
||||
}
|
||||
|
||||
user_to_edit.atype = new_type as i32;
|
||||
user_to_edit.save(&conn)
|
||||
// This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type
|
||||
// It returns different error messages per function.
|
||||
if new_type < UserOrgType::Admin {
|
||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
||||
Ok(_) => {}
|
||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
||||
}
|
||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log_event(
|
||||
EventType::OrganizationUserUpdated as i32,
|
||||
&user_to_edit.uuid,
|
||||
data.org_uuid,
|
||||
String::from(ACTING_ADMIN_USER),
|
||||
14, // Use UnknownBrowser type
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
|
||||
user_to_edit.atype = new_type;
|
||||
user_to_edit.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/users/update_revision")]
|
||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&conn)
|
||||
async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
User::update_all_revisions(&mut conn).await
|
||||
}
|
||||
|
||||
#[get("/organizations/overview")]
|
||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let organizations = Organization::get_all(&conn);
|
||||
let organizations_json: Vec<Value> = organizations
|
||||
.iter()
|
||||
.map(|o| {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||
org
|
||||
})
|
||||
.collect();
|
||||
async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
let mut organizations_json = Vec::new();
|
||||
for o in Organization::get_all(&mut conn).await {
|
||||
let mut org = o.to_json();
|
||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
||||
organizations_json.push(org);
|
||||
}
|
||||
|
||||
let text = AdminTemplateData::with_data("admin/organizations", json!(organizations_json)).render()?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[post("/organizations/<uuid>/delete")]
|
||||
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||
org.delete(&conn)
|
||||
async fn delete_organization(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
let org = Organization::find_by_uuid(&uuid, &mut conn).await.map_res("Organization doesn't exist")?;
|
||||
org.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -459,63 +519,37 @@ struct GitCommit {
|
||||
sha: String,
|
||||
}
|
||||
|
||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
async fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||
let github_api = get_reqwest_client();
|
||||
|
||||
Ok(github_api.get(url).timeout(Duration::from_secs(10)).send()?.error_for_status()?.json::<T>()?)
|
||||
Ok(github_api.get(url).send().await?.error_for_status()?.json::<T>().await?)
|
||||
}
|
||||
|
||||
fn has_http_access() -> bool {
|
||||
async fn has_http_access() -> bool {
|
||||
let http_access = get_reqwest_client();
|
||||
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").timeout(Duration::from_secs(10)).send() {
|
||||
match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await {
|
||||
Ok(r) => r.status().is_success(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use crate::util::read_file_string;
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let docker_base_image = docker_base_image();
|
||||
let has_http_access = has_http_access();
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|| env::var_os("HTTPS_PROXY").is_some()
|
||||
|| env::var_os("https_proxy").is_some();
|
||||
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
use cached::proc_macro::cached;
|
||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
||||
#[cached(time = 300, sync_writes = true)]
|
||||
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||
if has_http_access {
|
||||
(
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") {
|
||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest")
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name,
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main") {
|
||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await
|
||||
{
|
||||
Ok(mut c) => {
|
||||
c.sha.truncate(8);
|
||||
c.sha
|
||||
@@ -529,7 +563,9 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
} else {
|
||||
match get_github_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
}
|
||||
@@ -537,8 +573,43 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
)
|
||||
} else {
|
||||
("-".to_string(), "-".to_string(), "-".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/diagnostics")]
|
||||
async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) -> ApiResult<Html<String>> {
|
||||
use chrono::prelude::*;
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Get current running versions
|
||||
let web_vault_version: WebVaultVersion =
|
||||
match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
|
||||
Ok(s) => serde_json::from_str(&s)?,
|
||||
_ => WebVaultVersion {
|
||||
version: String::from("Version file missing"),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Execute some environment checks
|
||||
let running_within_docker = is_running_in_docker();
|
||||
let has_http_access = has_http_access().await;
|
||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||
|| env::var_os("http_proxy").is_some()
|
||||
|| env::var_os("HTTPS_PROXY").is_some()
|
||||
|| env::var_os("https_proxy").is_some();
|
||||
|
||||
// Check if we are able to resolve DNS entries
|
||||
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||
Ok(Some(a)) => a.ip().to_string(),
|
||||
_ => "Could not resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
get_release_info(has_http_access, running_within_docker).await;
|
||||
|
||||
let ip_header_name = match &ip_header.0 {
|
||||
Some(h) => h,
|
||||
_ => "",
|
||||
@@ -552,7 +623,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
"web_vault_version": web_vault_version.version,
|
||||
"latest_web_build": latest_web_build,
|
||||
"running_within_docker": running_within_docker,
|
||||
"docker_base_image": docker_base_image,
|
||||
"docker_base_image": docker_base_image(),
|
||||
"has_http_access": has_http_access,
|
||||
"ip_header_exists": &ip_header.0.is_some(),
|
||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
||||
@@ -560,8 +631,8 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
|
||||
"ip_header_config": &CONFIG.ip_header(),
|
||||
"uses_proxy": uses_proxy,
|
||||
"db_type": *DB_TYPE,
|
||||
"db_version": get_sql_server_version(&conn),
|
||||
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||
"db_version": get_sql_server_version(&mut conn).await,
|
||||
"admin_url": format!("{}/diagnostics", admin_url()),
|
||||
"overrides": &CONFIG.get_overrides().join(", "),
|
||||
"server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(),
|
||||
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||
@@ -589,9 +660,9 @@ fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/config/backup_db")]
|
||||
fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||
if *CAN_BACKUP {
|
||||
backup_database(&conn)
|
||||
backup_database(&mut conn).await
|
||||
} else {
|
||||
err!("Can't back up current DB (Only SQLite supports this feature)");
|
||||
}
|
||||
@@ -599,33 +670,34 @@ fn backup_db(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||
|
||||
pub struct AdminToken {}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for AdminToken {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for AdminToken {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
if CONFIG.disable_admin_token() {
|
||||
Outcome::Success(AdminToken {})
|
||||
Outcome::Success(Self {})
|
||||
} else {
|
||||
let mut cookies = request.cookies();
|
||||
let cookies = request.cookies();
|
||||
|
||||
let access_token = match cookies.get(COOKIE_NAME) {
|
||||
Some(cookie) => cookie.value(),
|
||||
None => return Outcome::Forward(()), // If there is no cookie, redirect to login
|
||||
None => return Outcome::Failure((Status::Unauthorized, "Unauthorized")),
|
||||
};
|
||||
|
||||
let ip = match request.guard::<ClientIp>() {
|
||||
let ip = match ClientIp::from_request(request).await {
|
||||
Outcome::Success(ip) => ip.ip,
|
||||
_ => err_handler!("Error getting Client IP"),
|
||||
};
|
||||
|
||||
if decode_admin(access_token).is_err() {
|
||||
// Remove admin cookie
|
||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
||||
error!("Invalid or expired admin JWT. IP: {}.", ip);
|
||||
return Outcome::Forward(());
|
||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
||||
}
|
||||
|
||||
Outcome::Success(AdminToken {})
|
||||
Outcome::Success(Self {})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use chrono::Utc;
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, Headers},
|
||||
api::{
|
||||
core::log_user_event, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType,
|
||||
},
|
||||
auth::{decode_delete, decode_invite, decode_verify_email, ClientIp, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
mail, CONFIG,
|
||||
@@ -34,12 +36,15 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
password_hint,
|
||||
prelogin,
|
||||
verify_password,
|
||||
api_key,
|
||||
rotate_api_key,
|
||||
get_known_device,
|
||||
]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct RegisterData {
|
||||
pub struct RegisterData {
|
||||
Email: String,
|
||||
Kdf: Option<i32>,
|
||||
KdfIterations: Option<i32>,
|
||||
@@ -60,41 +65,74 @@ struct KeysData {
|
||||
PublicKey: String,
|
||||
}
|
||||
|
||||
/// Trims whitespace from password hints, and converts blank password hints to `None`.
|
||||
fn clean_password_hint(password_hint: &Option<String>) -> Option<String> {
|
||||
match password_hint {
|
||||
None => None,
|
||||
Some(h) => match h.trim() {
|
||||
"" => None,
|
||||
ht => Some(ht.to_string()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult {
|
||||
if password_hint.is_some() && !CONFIG.password_hints_allowed() {
|
||||
err!("Password hints have been disabled by the administrator. Remove the hint and try again.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, conn).await
|
||||
}
|
||||
|
||||
pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> JsonResult {
|
||||
let data: RegisterData = data.into_inner().data;
|
||||
let email = data.Email.to_lowercase();
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &conn) {
|
||||
Some(user) => {
|
||||
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
|
||||
// This also prevents issues with very long usernames causing to large JWT's. See #2419
|
||||
if let Some(ref name) = data.Name {
|
||||
if name.len() > 50 {
|
||||
err!("The field Name must be a string with a maximum length of 50.");
|
||||
}
|
||||
}
|
||||
|
||||
// Check against the password hint setting here so if it fails, the user
|
||||
// can retry without losing their invitation below.
|
||||
let password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&password_hint)?;
|
||||
|
||||
let mut verified_by_invite = false;
|
||||
|
||||
let mut user = match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(mut user) => {
|
||||
if !user.password_hash.is_empty() {
|
||||
if CONFIG.is_signup_allowed(&email) {
|
||||
err!("User already exists")
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
|
||||
if let Some(token) = data.Token {
|
||||
let claims = decode_invite(&token)?;
|
||||
if claims.email == email {
|
||||
// Verify the email address when signing up via a valid invite token
|
||||
verified_by_invite = true;
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
user
|
||||
} else {
|
||||
err!("Registration email does not match invite email")
|
||||
}
|
||||
} else if Invitation::take(&email, &conn) {
|
||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
|
||||
} else if Invitation::take(&email, &mut conn).await {
|
||||
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
|
||||
user_org.status = UserOrgStatus::Accepted as i32;
|
||||
user_org.save(&conn)?;
|
||||
user_org.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
user
|
||||
} else if CONFIG.is_signup_allowed(&email) {
|
||||
// check if it's invited by emergency contact
|
||||
match EmergencyAccess::find_invited_by_grantee_email(&data.Email, &conn) {
|
||||
Some(_) => user,
|
||||
_ => err!("Account with this email already exists"),
|
||||
}
|
||||
} else if CONFIG.is_signup_allowed(&email)
|
||||
|| EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some()
|
||||
{
|
||||
user
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
}
|
||||
@@ -103,7 +141,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
// Order is important here; the invitation check must come first
|
||||
// because the vaultwarden admin can invite anyone, regardless
|
||||
// of other signup restrictions.
|
||||
if Invitation::take(&email, &conn) || CONFIG.is_signup_allowed(&email) {
|
||||
if Invitation::take(&email, &mut conn).await || CONFIG.is_signup_allowed(&email) {
|
||||
User::new(email.clone())
|
||||
} else {
|
||||
err!("Registration not allowed or user already exists")
|
||||
@@ -112,7 +150,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
};
|
||||
|
||||
// Make sure we don't leave a lingering invitation.
|
||||
Invitation::take(&email, &conn);
|
||||
Invitation::take(&email, &mut conn).await;
|
||||
|
||||
if let Some(client_kdf_iter) = data.KdfIterations {
|
||||
user.client_kdf_iter = client_kdf_iter;
|
||||
@@ -124,73 +162,75 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
||||
|
||||
user.set_password(&data.MasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.password_hint = password_hint;
|
||||
|
||||
// Add extra fields if present
|
||||
if let Some(name) = data.Name {
|
||||
user.name = name;
|
||||
}
|
||||
|
||||
if let Some(hint) = data.MasterPasswordHint {
|
||||
user.password_hint = Some(hint);
|
||||
}
|
||||
|
||||
if let Some(keys) = data.Keys {
|
||||
user.private_key = Some(keys.EncryptedPrivateKey);
|
||||
user.public_key = Some(keys.PublicKey);
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if CONFIG.signups_verify() {
|
||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid) {
|
||||
if CONFIG.signups_verify() && !verified_by_invite {
|
||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
|
||||
user.last_verifying_at = Some(user.created_at);
|
||||
} else if let Err(e) = mail::send_welcome(&user.email) {
|
||||
} else if let Err(e) = mail::send_welcome(&user.email).await {
|
||||
error!("Error sending welcome email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(json!({
|
||||
"Object": "register",
|
||||
"CaptchaBypassToken": "",
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/accounts/profile")]
|
||||
fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&conn))
|
||||
async fn profile(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
Json(headers.user.to_json(&mut conn).await)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ProfileData {
|
||||
#[serde(rename = "Culture")]
|
||||
_Culture: String, // Ignored, always use en-US
|
||||
MasterPasswordHint: Option<String>,
|
||||
// Culture: String, // Ignored, always use en-US
|
||||
// MasterPasswordHint: Option<String>, // Ignored, has been moved to ChangePassData
|
||||
Name: String,
|
||||
}
|
||||
|
||||
#[put("/accounts/profile", data = "<data>")]
|
||||
fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_profile(data, headers, conn)
|
||||
async fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_profile(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/profile", data = "<data>")]
|
||||
fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: ProfileData = data.into_inner().data;
|
||||
|
||||
let mut user = headers.user;
|
||||
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
|
||||
// This also prevents issues with very long usernames causing to large JWT's. See #2419
|
||||
if data.Name.len() > 50 {
|
||||
err!("The field Name must be a string with a maximum length of 50.");
|
||||
}
|
||||
|
||||
let mut user = headers.user;
|
||||
user.name = data.Name;
|
||||
user.password_hint = match data.MasterPasswordHint {
|
||||
Some(ref h) if h.is_empty() => None,
|
||||
_ => data.MasterPasswordHint,
|
||||
};
|
||||
user.save(&conn)?;
|
||||
Ok(Json(user.to_json(&conn)))
|
||||
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(user.to_json(&mut conn).await))
|
||||
}
|
||||
|
||||
#[get("/users/<uuid>/public-key")]
|
||||
fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &conn) {
|
||||
async fn get_public_keys(uuid: String, _headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let user = match User::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -203,7 +243,7 @@ fn get_public_keys(uuid: String, _headers: Headers, conn: DbConn) -> JsonResult
|
||||
}
|
||||
|
||||
#[post("/accounts/keys", data = "<data>")]
|
||||
fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: KeysData = data.into_inner().data;
|
||||
|
||||
let mut user = headers.user;
|
||||
@@ -211,7 +251,7 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
||||
user.private_key = Some(data.EncryptedPrivateKey);
|
||||
user.public_key = Some(data.PublicKey);
|
||||
|
||||
user.save(&conn)?;
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"PrivateKey": user.private_key,
|
||||
@@ -225,11 +265,17 @@ fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> Json
|
||||
struct ChangePassData {
|
||||
MasterPasswordHash: String,
|
||||
NewMasterPasswordHash: String,
|
||||
MasterPasswordHint: Option<String>,
|
||||
Key: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/password", data = "<data>")]
|
||||
fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_password(
|
||||
data: JsonUpcase<ChangePassData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
let data: ChangePassData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -237,12 +283,17 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.password_hint = clean_password_hint(&data.MasterPasswordHint);
|
||||
enforce_password_hint_setting(&user.password_hint)?;
|
||||
|
||||
log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
user.set_password(
|
||||
&data.NewMasterPasswordHash,
|
||||
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
|
||||
);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -257,7 +308,7 @@ struct ChangeKdfData {
|
||||
}
|
||||
|
||||
#[post("/accounts/kdf", data = "<data>")]
|
||||
fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: ChangeKdfData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -269,7 +320,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
||||
user.client_kdf_type = data.Kdf;
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -292,7 +343,13 @@ struct KeyData {
|
||||
}
|
||||
|
||||
#[post("/accounts/key", data = "<data>")]
|
||||
fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
async fn post_rotatekey(
|
||||
data: JsonUpcase<KeyData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data: KeyData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
@@ -303,7 +360,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
|
||||
// Update folder data
|
||||
for folder_data in data.Folders {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &conn) {
|
||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
None => err!("Folder doesn't exist"),
|
||||
};
|
||||
@@ -313,14 +370,14 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
}
|
||||
|
||||
saved_folder.name = folder_data.Name;
|
||||
saved_folder.save(&conn)?
|
||||
saved_folder.save(&mut conn).await?
|
||||
}
|
||||
|
||||
// Update cipher data
|
||||
use super::ciphers::update_cipher_from_data;
|
||||
|
||||
for cipher_data in data.Ciphers {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn) {
|
||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await {
|
||||
Some(cipher) => cipher,
|
||||
None => err!("Cipher doesn't exist"),
|
||||
};
|
||||
@@ -331,7 +388,8 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
|
||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &conn, &nt, UpdateType::None)?
|
||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None)
|
||||
.await?
|
||||
}
|
||||
|
||||
// Update user data
|
||||
@@ -341,11 +399,11 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
||||
user.private_key = Some(data.PrivateKey);
|
||||
user.reset_security_stamp();
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/security-stamp", data = "<data>")]
|
||||
fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -353,9 +411,9 @@ fn post_sstamp(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||
Device::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
user.reset_security_stamp();
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -366,7 +424,7 @@ struct EmailTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email-token", data = "<data>")]
|
||||
fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: EmailTokenData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -374,7 +432,7 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
|
||||
if User::find_by_mail(&data.NewEmail, &mut conn).await.is_some() {
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -382,17 +440,17 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
|
||||
err!("Email domain not allowed");
|
||||
}
|
||||
|
||||
let token = crypto::generate_token(6)?;
|
||||
let token = crypto::generate_email_token(6);
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Err(e) = mail::send_change_email(&data.NewEmail, &token) {
|
||||
if let Err(e) = mail::send_change_email(&data.NewEmail, &token).await {
|
||||
error!("Error sending change-email email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
user.email_new = Some(data.NewEmail);
|
||||
user.email_new_token = Some(token);
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -407,7 +465,7 @@ struct ChangeEmailData {
|
||||
}
|
||||
|
||||
#[post("/accounts/email", data = "<data>")]
|
||||
fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: ChangeEmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -415,7 +473,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if User::find_by_mail(&data.NewEmail, &conn).is_some() {
|
||||
if User::find_by_mail(&data.NewEmail, &mut conn).await.is_some() {
|
||||
err!("Email already in use");
|
||||
}
|
||||
|
||||
@@ -450,18 +508,18 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
||||
user.set_password(&data.NewMasterPasswordHash, None);
|
||||
user.akey = data.Key;
|
||||
|
||||
user.save(&conn)
|
||||
user.save(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email")]
|
||||
fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
async fn post_verify_email(headers: Headers) -> EmptyResult {
|
||||
let user = headers.user;
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
err!("Cannot verify email address");
|
||||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
||||
error!("Error sending verify_email email: {:#?}", e);
|
||||
}
|
||||
|
||||
@@ -476,10 +534,10 @@ struct VerifyEmailTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-email-token", data = "<data>")]
|
||||
fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyEmailTokenData = data.into_inner().data;
|
||||
|
||||
let mut user = match User::find_by_uuid(&data.UserId, &conn) {
|
||||
let mut user = match User::find_by_uuid(&data.UserId, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -494,7 +552,7 @@ fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, conn: DbConn)
|
||||
user.verified_at = Some(Utc::now().naive_utc());
|
||||
user.last_verifying_at = None;
|
||||
user.login_verify_count = 0;
|
||||
if let Err(e) = user.save(&conn) {
|
||||
if let Err(e) = user.save(&mut conn).await {
|
||||
error!("Error saving email verification: {:#?}", e);
|
||||
}
|
||||
|
||||
@@ -508,14 +566,12 @@ struct DeleteRecoverData {
|
||||
}
|
||||
|
||||
#[post("/accounts/delete-recover", data = "<data>")]
|
||||
fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverData = data.into_inner().data;
|
||||
|
||||
let user = User::find_by_mail(&data.Email, &conn);
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
if let Some(user) = user {
|
||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid) {
|
||||
if let Some(user) = User::find_by_mail(&data.Email, &mut conn).await {
|
||||
if let Err(e) = mail::send_delete_account(&user.email, &user.uuid).await {
|
||||
error!("Error sending delete account email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
@@ -537,10 +593,10 @@ struct DeleteRecoverTokenData {
|
||||
}
|
||||
|
||||
#[post("/accounts/delete-recover-token", data = "<data>")]
|
||||
fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbConn) -> EmptyResult {
|
||||
async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: DeleteRecoverTokenData = data.into_inner().data;
|
||||
|
||||
let user = match User::find_by_uuid(&data.UserId, &conn) {
|
||||
let user = match User::find_by_uuid(&data.UserId, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("User doesn't exist"),
|
||||
};
|
||||
@@ -552,16 +608,16 @@ fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, conn: DbC
|
||||
if claims.sub != user.uuid {
|
||||
err!("Invalid claim");
|
||||
}
|
||||
user.delete(&conn)
|
||||
user.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/delete", data = "<data>")]
|
||||
fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_account(data, headers, conn)
|
||||
async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_account(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[delete("/accounts", data = "<data>")]
|
||||
fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -569,7 +625,7 @@ fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
user.delete(&conn)
|
||||
user.delete(&mut conn).await
|
||||
}
|
||||
|
||||
#[get("/accounts/revision-date")]
|
||||
@@ -585,7 +641,7 @@ struct PasswordHintData {
|
||||
}
|
||||
|
||||
#[post("/accounts/password-hint", data = "<data>")]
|
||||
fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResult {
|
||||
async fn password_hint(data: JsonUpcase<PasswordHintData>, mut conn: DbConn) -> EmptyResult {
|
||||
if !CONFIG.mail_enabled() && !CONFIG.show_password_hint() {
|
||||
err!("This server is not configured to provide password hints.");
|
||||
}
|
||||
@@ -595,19 +651,18 @@ fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
let data: PasswordHintData = data.into_inner().data;
|
||||
let email = &data.Email;
|
||||
|
||||
match User::find_by_mail(email, &conn) {
|
||||
match User::find_by_mail(email, &mut conn).await {
|
||||
None => {
|
||||
// To prevent user enumeration, act as if the user exists.
|
||||
if CONFIG.mail_enabled() {
|
||||
// There is still a timing side channel here in that the code
|
||||
// paths that send mail take noticeably longer than ones that
|
||||
// don't. Add a randomized sleep to mitigate this somewhat.
|
||||
use rand::{thread_rng, Rng};
|
||||
let mut rng = thread_rng();
|
||||
let base = 1000;
|
||||
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
let delta: i32 = 100;
|
||||
let sleep_ms = (base + rng.gen_range(-delta..=delta)) as u64;
|
||||
std::thread::sleep(std::time::Duration::from_millis(sleep_ms));
|
||||
let sleep_ms = (1_000 + rng.gen_range(-delta..=delta)) as u64;
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(sleep_ms)).await;
|
||||
Ok(())
|
||||
} else {
|
||||
err!(NO_HINT);
|
||||
@@ -616,7 +671,7 @@ fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
Some(user) => {
|
||||
let hint: Option<String> = user.password_hint;
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_password_hint(email, hint)?;
|
||||
mail::send_password_hint(email, hint).await?;
|
||||
Ok(())
|
||||
} else if let Some(hint) = hint {
|
||||
err!(format!("Your password hint is: {}", hint));
|
||||
@@ -629,15 +684,19 @@ fn password_hint(data: JsonUpcase<PasswordHintData>, conn: DbConn) -> EmptyResul
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct PreloginData {
|
||||
pub struct PreloginData {
|
||||
Email: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/prelogin", data = "<data>")]
|
||||
fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
pub async fn _prelogin(data: JsonUpcase<PreloginData>, mut conn: DbConn) -> Json<Value> {
|
||||
let data: PreloginData = data.into_inner().data;
|
||||
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &conn) {
|
||||
let (kdf_type, kdf_iter) = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => (user.client_kdf_type, user.client_kdf_iter),
|
||||
None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT),
|
||||
};
|
||||
@@ -647,15 +706,17 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
"KdfIterations": kdf_iter
|
||||
}))
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct VerifyPasswordData {
|
||||
struct SecretVerificationRequest {
|
||||
MasterPasswordHash: String,
|
||||
}
|
||||
|
||||
#[post("/accounts/verify-password", data = "<data>")]
|
||||
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||
let data: VerifyPasswordData = data.into_inner().data;
|
||||
fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers) -> EmptyResult {
|
||||
let data: SecretVerificationRequest = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
@@ -664,3 +725,50 @@ fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn _api_key(
|
||||
data: JsonUpcase<SecretVerificationRequest>,
|
||||
rotate: bool,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: SecretVerificationRequest = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password")
|
||||
}
|
||||
|
||||
if rotate || user.api_key.is_none() {
|
||||
user.api_key = Some(crypto::generate_api_key());
|
||||
user.save(&mut conn).await.expect("Error saving API key");
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"ApiKey": user.api_key,
|
||||
"Object": "apiKey",
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/accounts/api-key", data = "<data>")]
|
||||
async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, false, headers, conn).await
|
||||
}
|
||||
|
||||
#[post("/accounts/rotate-api-key", data = "<data>")]
|
||||
async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
_api_key(data, true, headers, conn).await
|
||||
}
|
||||
|
||||
#[get("/devices/knowndevice/<email>/<uuid>")]
|
||||
async fn get_known_device(email: String, uuid: String, mut conn: DbConn) -> String {
|
||||
// This endpoint doesn't have auth header
|
||||
if let Some(user) = User::find_by_mail(&email, &mut conn).await {
|
||||
match Device::find_by_uuid_and_user(&uuid, &user.uuid, &mut conn).await {
|
||||
Some(_) => String::from("true"),
|
||||
_ => String::from("false"),
|
||||
}
|
||||
} else {
|
||||
String::from("false")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use chrono::{Duration, Utc};
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::{serde::json::Json, Route};
|
||||
use serde_json::Value;
|
||||
use std::borrow::Borrow;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString},
|
||||
api::{
|
||||
core::{CipherSyncData, CipherSyncType},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
},
|
||||
auth::{decode_emergency_access_invite, Headers},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
@@ -36,13 +37,14 @@ pub fn routes() -> Vec<Route> {
|
||||
// region get
|
||||
|
||||
#[get("/emergency-access/trusted")]
|
||||
fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn);
|
||||
|
||||
let emergency_access_list_json: Vec<Value> =
|
||||
emergency_access_list.iter().map(|e| e.to_json_grantee_details(&conn)).collect();
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||
for ea in emergency_access_list {
|
||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
@@ -52,13 +54,14 @@ fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[get("/emergency-access/granted")]
|
||||
fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn);
|
||||
|
||||
let emergency_access_list_json: Vec<Value> =
|
||||
emergency_access_list.iter().map(|e| e.to_json_grantor_details(&conn)).collect();
|
||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
|
||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||
for ea in emergency_access_list {
|
||||
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": emergency_access_list_json,
|
||||
@@ -68,11 +71,11 @@ fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[get("/emergency-access/<emer_id>")]
|
||||
fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult {
|
||||
async fn get_emergency_access(emer_id: String, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn))),
|
||||
match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
||||
None => err!("Emergency access not valid."),
|
||||
}
|
||||
}
|
||||
@@ -81,7 +84,7 @@ fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult {
|
||||
|
||||
// region put/post
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessUpdateData {
|
||||
Type: NumberOrString,
|
||||
@@ -90,17 +93,25 @@ struct EmergencyAccessUpdateData {
|
||||
}
|
||||
|
||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||
fn put_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, conn)
|
||||
async fn put_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
post_emergency_access(emer_id, data, conn).await
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||
fn post_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
||||
async fn post_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emergency_access) => emergency_access,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -114,7 +125,7 @@ fn post_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdate
|
||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
||||
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
}
|
||||
|
||||
@@ -123,12 +134,12 @@ fn post_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdate
|
||||
// region delete
|
||||
|
||||
#[delete("/emergency-access/<emer_id>")]
|
||||
fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn delete_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let grantor_user = headers.user;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => {
|
||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
||||
err!("Emergency access not valid.")
|
||||
@@ -137,20 +148,20 @@ fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> E
|
||||
}
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
emergency_access.delete(&conn)?;
|
||||
emergency_access.delete(&mut conn).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/delete")]
|
||||
fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_emergency_access(emer_id, headers, conn)
|
||||
async fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
delete_emergency_access(emer_id, headers, conn).await
|
||||
}
|
||||
|
||||
// endregion
|
||||
|
||||
// region invite
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessInviteData {
|
||||
Email: String,
|
||||
@@ -159,7 +170,7 @@ struct EmergencyAccessInviteData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/invite", data = "<data>")]
|
||||
fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
||||
@@ -180,10 +191,10 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
err!("You can not set yourself as an emergency contact.")
|
||||
}
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &conn) {
|
||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
||||
None => {
|
||||
if !CONFIG.signups_allowed() {
|
||||
err!(format!("Grantee user does not exist: {}", email))
|
||||
if !CONFIG.invitations_allowed() {
|
||||
err!(format!("Grantee user does not exist: {}", &email))
|
||||
}
|
||||
|
||||
if !CONFIG.is_email_domain_allowed(&email) {
|
||||
@@ -191,12 +202,12 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
}
|
||||
|
||||
if !CONFIG.mail_enabled() {
|
||||
let invitation = Invitation::new(email.clone());
|
||||
invitation.save(&conn)?;
|
||||
let invitation = Invitation::new(&email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
let mut user = User::new(email.clone());
|
||||
user.save(&conn)?;
|
||||
user.save(&mut conn).await?;
|
||||
user
|
||||
}
|
||||
Some(user) => user,
|
||||
@@ -206,39 +217,34 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
&grantor_user.uuid,
|
||||
&grantee_user.uuid,
|
||||
&grantee_user.email,
|
||||
&conn,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
.is_some()
|
||||
{
|
||||
err!(format!("Grantee user already invited: {}", email))
|
||||
err!(format!("Grantee user already invited: {}", &grantee_user.email))
|
||||
}
|
||||
|
||||
let mut new_emergency_access = EmergencyAccess::new(
|
||||
grantor_user.uuid.clone(),
|
||||
Some(grantee_user.email.clone()),
|
||||
emergency_access_status,
|
||||
new_type,
|
||||
wait_time_days,
|
||||
);
|
||||
new_emergency_access.save(&conn)?;
|
||||
let mut new_emergency_access =
|
||||
EmergencyAccess::new(grantor_user.uuid, grantee_user.email, emergency_access_status, new_type, wait_time_days);
|
||||
new_emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite(
|
||||
&grantee_user.email,
|
||||
&new_emergency_access.email.expect("Grantee email does not exists"),
|
||||
&grantee_user.uuid,
|
||||
Some(new_emergency_access.uuid),
|
||||
Some(grantor_user.name.clone()),
|
||||
Some(grantor_user.email),
|
||||
)?;
|
||||
&new_emergency_access.uuid,
|
||||
&grantor_user.name,
|
||||
&grantor_user.email,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match User::find_by_mail(&email, &conn) {
|
||||
Some(user) => {
|
||||
match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()) {
|
||||
Ok(v) => (v),
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
}
|
||||
match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => match accept_invite_process(user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
},
|
||||
None => err!("Grantee user not found."),
|
||||
}
|
||||
}
|
||||
@@ -247,10 +253,10 @@ fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, co
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||
fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn resend_invite(emer_id: String, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -268,7 +274,7 @@ fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult
|
||||
None => err!("Email not valid."),
|
||||
};
|
||||
|
||||
let grantee_user = match User::find_by_mail(&email, &conn) {
|
||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
@@ -279,19 +285,20 @@ fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult
|
||||
mail::send_emergency_access_invite(
|
||||
&email,
|
||||
&grantor_user.uuid,
|
||||
Some(emergency_access.uuid),
|
||||
Some(grantor_user.name.clone()),
|
||||
Some(grantor_user.email),
|
||||
)?;
|
||||
&emergency_access.uuid,
|
||||
&grantor_user.name,
|
||||
&grantor_user.email,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
if Invitation::find_by_mail(&email, &conn).is_none() {
|
||||
let invitation = Invitation::new(email);
|
||||
invitation.save(&conn)?;
|
||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||
let invitation = Invitation::new(&email);
|
||||
invitation.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
// Automatically mark user as accepted if no email invites
|
||||
match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow()) {
|
||||
Ok(v) => (v),
|
||||
match accept_invite_process(grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
}
|
||||
@@ -306,43 +313,54 @@ struct AcceptData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||
fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) -> EmptyResult {
|
||||
async fn accept_invite(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<AcceptData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let data: AcceptData = data.into_inner().data;
|
||||
let token = &data.Token;
|
||||
let claims = decode_emergency_access_invite(token)?;
|
||||
|
||||
let grantee_user = match User::find_by_mail(&claims.email, &conn) {
|
||||
// This can happen if the user who received the invite used a different email to signup.
|
||||
// Since we do not know if this is intented, we error out here and do nothing with the invite.
|
||||
if claims.email != headers.user.email {
|
||||
err!("Claim email does not match current users email")
|
||||
}
|
||||
|
||||
let grantee_user = match User::find_by_mail(&claims.email, &mut conn).await {
|
||||
Some(user) => {
|
||||
Invitation::take(&claims.email, &conn);
|
||||
Invitation::take(&claims.email, &mut conn).await;
|
||||
user
|
||||
}
|
||||
None => err!("Invited user not found"),
|
||||
};
|
||||
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if (claims.emer_id.is_some() && emer_id == claims.emer_id.unwrap())
|
||||
&& (claims.grantor_name.is_some() && grantor_user.name == claims.grantor_name.unwrap())
|
||||
&& (claims.grantor_email.is_some() && grantor_user.email == claims.grantor_email.unwrap())
|
||||
if emer_id == claims.emer_id
|
||||
&& grantor_user.name == claims.grantor_name
|
||||
&& grantor_user.email == claims.grantor_email
|
||||
{
|
||||
match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn) {
|
||||
Ok(v) => (v),
|
||||
match accept_invite_process(grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => err!(e.to_string()),
|
||||
}
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email)?;
|
||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -351,14 +369,13 @@ fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) ->
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_invite_process(grantee_uuid: String, emer_id: String, email: Option<String>, conn: &DbConn) -> EmptyResult {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn) {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
let emer_email = emergency_access.email;
|
||||
if emer_email.is_none() || emer_email != email {
|
||||
async fn accept_invite_process(
|
||||
grantee_uuid: String,
|
||||
emergency_access: &mut EmergencyAccess,
|
||||
grantee_email: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
||||
err!("User email does not match invite.");
|
||||
}
|
||||
|
||||
@@ -369,7 +386,7 @@ fn accept_invite_process(grantee_uuid: String, emer_id: String, email: Option<St
|
||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
||||
emergency_access.grantee_uuid = Some(grantee_uuid);
|
||||
emergency_access.email = None;
|
||||
emergency_access.save(conn)
|
||||
emergency_access.save(conn).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -379,11 +396,11 @@ struct ConfirmData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||
fn confirm_emergency_access(
|
||||
async fn confirm_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<ConfirmData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
@@ -391,7 +408,7 @@ fn confirm_emergency_access(
|
||||
let data: ConfirmData = data.into_inner().data;
|
||||
let key = data.Key;
|
||||
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -402,13 +419,13 @@ fn confirm_emergency_access(
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
@@ -417,10 +434,10 @@ fn confirm_emergency_access(
|
||||
emergency_access.key_encrypted = Some(key);
|
||||
emergency_access.email = None;
|
||||
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name)?;
|
||||
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
@@ -433,22 +450,22 @@ fn confirm_emergency_access(
|
||||
// region access emergency access
|
||||
|
||||
#[post("/emergency-access/<emer_id>/initiate")]
|
||||
fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn initiate_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let initiating_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|
||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid.clone())
|
||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
@@ -458,51 +475,51 @@ fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
emergency_access.updated_at = now;
|
||||
emergency_access.recovery_initiated_at = Some(now);
|
||||
emergency_access.last_notification_at = Some(now);
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_initiated(
|
||||
&grantor_user.email,
|
||||
&initiating_user.name,
|
||||
emergency_access.get_type_as_str(),
|
||||
&emergency_access.wait_time_days.clone().to_string(),
|
||||
)?;
|
||||
&emergency_access.wait_time_days,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/approve")]
|
||||
fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn approve_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let approving_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
|| emergency_access.grantor_uuid != approving_user.uuid
|
||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)?;
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
@@ -511,39 +528,37 @@ fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/reject")]
|
||||
fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn reject_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let rejecting_user = headers.user;
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|
||||
|| emergency_access.grantor_uuid != rejecting_user.uuid
|
||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
||||
{
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
|
||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantee user not found."),
|
||||
};
|
||||
|
||||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
|
||||
emergency_access.key_encrypted = None;
|
||||
emergency_access.save(&conn)?;
|
||||
emergency_access.save(&mut conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name)?;
|
||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
|
||||
}
|
||||
Ok(Json(emergency_access.to_json()))
|
||||
} else {
|
||||
@@ -556,24 +571,27 @@ fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> J
|
||||
// region action
|
||||
|
||||
#[post("/emergency-access/<emer_id>/view")]
|
||||
fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn view_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let host = headers.host;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
|
||||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::View) {
|
||||
if !is_valid_request(&emergency_access, headers.user.uuid, EmergencyAccessType::View) {
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn);
|
||||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &mut conn).await;
|
||||
let cipher_sync_data =
|
||||
CipherSyncData::new(&emergency_access.grantor_uuid, &ciphers, CipherSyncType::User, &mut conn).await;
|
||||
|
||||
let ciphers_json: Vec<Value> =
|
||||
ciphers.iter().map(|c| c.to_json(&host, &emergency_access.grantor_uuid, &conn)).collect();
|
||||
let mut ciphers_json = Vec::new();
|
||||
for c in ciphers {
|
||||
ciphers_json
|
||||
.push(c.to_json(&headers.host, &emergency_access.grantor_uuid, Some(&cipher_sync_data), &mut conn).await);
|
||||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"Ciphers": ciphers_json,
|
||||
@@ -583,11 +601,11 @@ fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> Jso
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/takeover")]
|
||||
fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn takeover_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -596,7 +614,7 @@ fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
@@ -609,7 +627,7 @@ fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EmergencyAccessPasswordData {
|
||||
NewMasterPasswordHash: String,
|
||||
@@ -617,11 +635,11 @@ struct EmergencyAccessPasswordData {
|
||||
}
|
||||
|
||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||
fn password_emergency_access(
|
||||
async fn password_emergency_access(
|
||||
emer_id: String,
|
||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> EmptyResult {
|
||||
check_emergency_access_allowed()?;
|
||||
|
||||
@@ -630,7 +648,7 @@ fn password_emergency_access(
|
||||
let key = data.Key;
|
||||
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -639,7 +657,7 @@ fn password_emergency_access(
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
@@ -647,18 +665,15 @@ fn password_emergency_access(
|
||||
// change grantor_user password
|
||||
grantor_user.set_password(new_master_password_hash, None);
|
||||
grantor_user.akey = key;
|
||||
grantor_user.save(&conn)?;
|
||||
grantor_user.save(&mut conn).await?;
|
||||
|
||||
// Disable TwoFactor providers since they will otherwise block logins
|
||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn)?;
|
||||
|
||||
// Removing owner, check that there are at least another owner
|
||||
let user_org_grantor = UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn);
|
||||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?;
|
||||
|
||||
// Remove grantor from all organisations unless Owner
|
||||
for user_org in user_org_grantor {
|
||||
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &mut conn).await {
|
||||
if user_org.atype != UserOrgType::Owner as i32 {
|
||||
user_org.delete(&conn)?;
|
||||
user_org.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -667,9 +682,9 @@ fn password_emergency_access(
|
||||
// endregion
|
||||
|
||||
#[get("/emergency-access/<emer_id>/policies")]
|
||||
fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn policies_emergency_access(emer_id: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let requesting_user = headers.user;
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
|
||||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &mut conn).await {
|
||||
Some(emer) => emer,
|
||||
None => err!("Emergency access not valid."),
|
||||
};
|
||||
@@ -678,13 +693,13 @@ fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) ->
|
||||
err!("Emergency access not valid.")
|
||||
}
|
||||
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
|
||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Grantor user not found."),
|
||||
};
|
||||
|
||||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &conn);
|
||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn);
|
||||
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": policies_json,
|
||||
@@ -710,44 +725,52 @@ fn check_emergency_access_allowed() -> EmptyResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn emergency_request_timeout_job(pool: DbPool) {
|
||||
pub async fn emergency_request_timeout_job(pool: DbPool) {
|
||||
debug!("Start emergency_request_timeout_job");
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(conn) = pool.get() {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn);
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await;
|
||||
|
||||
if emergency_access_list.is_empty() {
|
||||
debug!("No emergency request timeout to approve");
|
||||
}
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
for mut emer in emergency_access_list {
|
||||
if emer.recovery_initiated_at.is_some()
|
||||
&& Utc::now().naive_utc()
|
||||
>= emer.recovery_initiated_at.unwrap() + Duration::days(emer.wait_time_days as i64)
|
||||
{
|
||||
emer.status = EmergencyAccessStatus::RecoveryApproved as i32;
|
||||
emer.save(&conn).expect("Cannot save emergency access on job");
|
||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||
let recovery_allowed_at =
|
||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
||||
if recovery_allowed_at.le(&now) {
|
||||
// Only update the access status
|
||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||
emer.update_access_status_and_save(EmergencyAccessStatus::RecoveryApproved as i32, &now, &mut conn)
|
||||
.await
|
||||
.expect("Unable to update emergency access status");
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found.");
|
||||
let grantor_user =
|
||||
User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found");
|
||||
|
||||
// get grantee user to send Accepted email
|
||||
let grantee_user =
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
|
||||
.expect("Grantee user not found.");
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn)
|
||||
.await
|
||||
.expect("Grantee user not found");
|
||||
|
||||
mail::send_emergency_access_recovery_timed_out(
|
||||
&grantor_user.email,
|
||||
&grantee_user.name.clone(),
|
||||
&grantee_user.name,
|
||||
emer.get_type_as_str(),
|
||||
)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name.clone())
|
||||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
}
|
||||
}
|
||||
@@ -757,44 +780,56 @@ pub fn emergency_request_timeout_job(pool: DbPool) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn emergency_notification_reminder_job(pool: DbPool) {
|
||||
pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
||||
debug!("Start emergency_notification_reminder_job");
|
||||
if !CONFIG.emergency_access_allowed() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(conn) = pool.get() {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn);
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await;
|
||||
|
||||
if emergency_access_list.is_empty() {
|
||||
debug!("No emergency request reminder notification to send");
|
||||
}
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
for mut emer in emergency_access_list {
|
||||
if (emer.recovery_initiated_at.is_some()
|
||||
&& Utc::now().naive_utc()
|
||||
>= emer.recovery_initiated_at.unwrap() + Duration::days((emer.wait_time_days as i64) - 1))
|
||||
&& (emer.last_notification_at.is_none()
|
||||
|| (emer.last_notification_at.is_some()
|
||||
&& Utc::now().naive_utc() >= emer.last_notification_at.unwrap() + Duration::days(1)))
|
||||
{
|
||||
emer.save(&conn).expect("Cannot save emergency access on job");
|
||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||
// Calculate the day before the recovery will become active
|
||||
let final_recovery_reminder_at =
|
||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||
last_notification_at + Duration::days(1)
|
||||
} else {
|
||||
now
|
||||
};
|
||||
if final_recovery_reminder_at.le(&now) && next_recovery_reminder_at.le(&now) {
|
||||
// Only update the last notification date
|
||||
// Updating the whole record could cause issues when the emergency_request_timeout_job is also active
|
||||
emer.update_last_notification_date_and_save(&now, &mut conn)
|
||||
.await
|
||||
.expect("Unable to update emergency access notification date");
|
||||
|
||||
if CONFIG.mail_enabled() {
|
||||
// get grantor user to send Accepted email
|
||||
let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found.");
|
||||
let grantor_user =
|
||||
User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found");
|
||||
|
||||
// get grantee user to send Accepted email
|
||||
let grantee_user =
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
|
||||
.expect("Grantee user not found.");
|
||||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn)
|
||||
.await
|
||||
.expect("Grantee user not found");
|
||||
|
||||
mail::send_emergency_access_recovery_reminder(
|
||||
&grantor_user.email,
|
||||
&grantee_user.name.clone(),
|
||||
&grantee_user.name,
|
||||
emer.get_type_as_str(),
|
||||
&emer.wait_time_days.to_string(), // TODO(jjlin): This should be the number of days left.
|
||||
"1", // This notification is only triggered one day before the activation
|
||||
)
|
||||
.await
|
||||
.expect("Error on sending email");
|
||||
}
|
||||
}
|
||||
|
||||
341
src/api/core/events.rs
Normal file
@@ -0,0 +1,341 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use chrono::NaiveDateTime;
|
||||
use rocket::{form::FromForm, serde::json::Json, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
||||
auth::{AdminHeaders, ClientIp, Headers},
|
||||
db::{
|
||||
models::{Cipher, Event, UserOrganization},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
util::parse_date,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
/// ###############################################################################################################
|
||||
/// /api routes
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_org_events, get_cipher_events, get_user_events,]
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EventRange {
|
||||
start: String,
|
||||
end: String,
|
||||
#[field(name = "continuationToken")]
|
||||
continuation_token: Option<String>,
|
||||
}
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
|
||||
#[get("/organizations/<org_id>/events?<data..>")]
|
||||
async fn get_org_events(org_id: String, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>/events?<data..>")]
|
||||
async fn get_cipher_events(cipher_id: String, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let mut events_json = Vec::with_capacity(0);
|
||||
if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
}
|
||||
events_json
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
|
||||
async fn get_user_events(
|
||||
org_id: String,
|
||||
user_org_id: String,
|
||||
data: EventRange,
|
||||
_headers: AdminHeaders,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
// Return an empty vec when we org events are disabled.
|
||||
// This prevents client errors
|
||||
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
|
||||
Vec::with_capacity(0)
|
||||
} else {
|
||||
let start_date = parse_date(&data.start);
|
||||
let end_date = if let Some(before_date) = &data.continuation_token {
|
||||
parse_date(before_date)
|
||||
} else {
|
||||
parse_date(&data.end)
|
||||
};
|
||||
|
||||
Event::find_by_org_and_user_org(&org_id, &user_org_id, &start_date, &end_date, &mut conn)
|
||||
.await
|
||||
.iter()
|
||||
.map(|e| e.to_json())
|
||||
.collect()
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Data": events_json,
|
||||
"Object": "list",
|
||||
"ContinuationToken": get_continuation_token(&events_json),
|
||||
})))
|
||||
}
|
||||
|
||||
fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
|
||||
// When the length of the vec equals the max page_size there probably is more data
|
||||
// When it is less, then all events are loaded.
|
||||
if events_json.len() as i64 == Event::PAGE_SIZE {
|
||||
if let Some(last_event) = events_json.last() {
|
||||
last_event["date"].as_str()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// ###############################################################################################################
|
||||
/// /events routes
|
||||
pub fn main_routes() -> Vec<Route> {
|
||||
routes![post_events_collect,]
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EventCollection {
|
||||
// Mandatory
|
||||
Type: i32,
|
||||
Date: String,
|
||||
|
||||
// Optional
|
||||
CipherId: Option<String>,
|
||||
OrganizationId: Option<String>,
|
||||
}
|
||||
|
||||
// Upstream:
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||
#[post("/collect", format = "application/json", data = "<data>")]
|
||||
async fn post_events_collect(
|
||||
data: JsonUpcaseVec<EventCollection>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> EmptyResult {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for event in data.iter().map(|d| &d.data) {
|
||||
let event_date = parse_date(&event.Date);
|
||||
match event.Type {
|
||||
1000..=1099 => {
|
||||
_log_user_event(
|
||||
event.Type,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
1600..=1699 => {
|
||||
if let Some(org_uuid) = &event.OrganizationId {
|
||||
_log_event(
|
||||
event.Type,
|
||||
org_uuid,
|
||||
String::from(org_uuid),
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if let Some(cipher_uuid) = &event.CipherId {
|
||||
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||
if let Some(org_uuid) = cipher.organization_uuid {
|
||||
_log_event(
|
||||
event.Type,
|
||||
cipher_uuid,
|
||||
org_uuid,
|
||||
&headers.user.uuid,
|
||||
headers.device.atype,
|
||||
Some(event_date),
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_user_event(event_type, user_uuid, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
async fn _log_user_event(
|
||||
event_type: i32,
|
||||
user_uuid: &str,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await;
|
||||
let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org
|
||||
|
||||
// Upstream saves the event also without any org_uuid.
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
event.user_uuid = Some(String::from(user_uuid));
|
||||
event.act_user_uuid = Some(String::from(user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
events.push(event);
|
||||
|
||||
// For each org a user is a member of store these events per org
|
||||
for org_uuid in orgs {
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
event.user_uuid = Some(String::from(user_uuid));
|
||||
event.org_uuid = Some(org_uuid);
|
||||
event.act_user_uuid = Some(String::from(user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
events.push(event);
|
||||
}
|
||||
|
||||
Event::save_user_event(events, conn).await.unwrap_or(());
|
||||
}
|
||||
|
||||
pub async fn log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: String,
|
||||
act_user_uuid: String,
|
||||
device_type: i32,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
if !CONFIG.org_events_enabled() {
|
||||
return;
|
||||
}
|
||||
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn _log_event(
|
||||
event_type: i32,
|
||||
source_uuid: &str,
|
||||
org_uuid: String,
|
||||
act_user_uuid: &str,
|
||||
device_type: i32,
|
||||
event_date: Option<NaiveDateTime>,
|
||||
ip: &IpAddr,
|
||||
conn: &mut DbConn,
|
||||
) {
|
||||
// Create a new empty event
|
||||
let mut event = Event::new(event_type, event_date);
|
||||
match event_type {
|
||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||
// Collection Events
|
||||
1100..=1199 => {
|
||||
event.cipher_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Collection Events
|
||||
1300..=1399 => {
|
||||
event.collection_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Group Events
|
||||
1400..=1499 => {
|
||||
event.group_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Org User Events
|
||||
1500..=1599 => {
|
||||
event.org_user_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// 1600..=1699 Are organizational events, and they do not need the source_uuid
|
||||
// Policy Events
|
||||
1700..=1799 => {
|
||||
event.policy_uuid = Some(String::from(source_uuid));
|
||||
}
|
||||
// Ignore others
|
||||
_ => {}
|
||||
}
|
||||
|
||||
event.org_uuid = Some(org_uuid);
|
||||
event.act_user_uuid = Some(String::from(act_user_uuid));
|
||||
event.device_type = Some(device_type);
|
||||
event.ip_address = Some(ip.to_string());
|
||||
event.save(conn).await.unwrap_or(());
|
||||
}
|
||||
|
||||
pub async fn event_cleanup_job(pool: DbPool) {
|
||||
debug!("Start events cleanup job");
|
||||
if CONFIG.events_days_retain().is_none() {
|
||||
debug!("events_days_retain is not configured, abort");
|
||||
return;
|
||||
}
|
||||
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
Event::clean_events(&mut conn).await.ok();
|
||||
} else {
|
||||
error!("Failed to get DB connection while trying to cleanup the events table")
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -12,9 +12,8 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
}
|
||||
|
||||
#[get("/folders")]
|
||||
fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &conn);
|
||||
|
||||
async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
let folders = Folder::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||
|
||||
Json(json!({
|
||||
@@ -25,8 +24,8 @@ fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/folders/<uuid>")]
|
||||
fn get_folder(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
async fn get_folder(uuid: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -45,27 +44,39 @@ pub struct FolderData {
|
||||
}
|
||||
|
||||
#[post("/folders", data = "<data>")]
|
||||
fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
||||
|
||||
folder.save(&conn)?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder);
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderCreate, &folder).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>", data = "<data>")]
|
||||
fn post_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, nt)
|
||||
async fn post_folder(
|
||||
uuid: String,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
put_folder(uuid, data, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[put("/folders/<uuid>", data = "<data>")]
|
||||
fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
async fn put_folder(
|
||||
uuid: String,
|
||||
data: JsonUpcase<FolderData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data: FolderData = data.into_inner().data;
|
||||
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
let mut folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -76,20 +87,20 @@ fn put_folder(uuid: String, data: JsonUpcase<FolderData>, headers: Headers, conn
|
||||
|
||||
folder.name = data.Name;
|
||||
|
||||
folder.save(&conn)?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder);
|
||||
folder.save(&mut conn).await?;
|
||||
nt.send_folder_update(UpdateType::FolderUpdate, &folder).await;
|
||||
|
||||
Ok(Json(folder.to_json()))
|
||||
}
|
||||
|
||||
#[post("/folders/<uuid>/delete")]
|
||||
fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt)
|
||||
async fn delete_folder_post(uuid: String, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
delete_folder(uuid, headers, conn, nt).await
|
||||
}
|
||||
|
||||
#[delete("/folders/<uuid>")]
|
||||
fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &conn) {
|
||||
async fn delete_folder(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let folder = match Folder::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(folder) => folder,
|
||||
_ => err!("Invalid folder"),
|
||||
};
|
||||
@@ -99,8 +110,8 @@ fn delete_folder(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> Em
|
||||
}
|
||||
|
||||
// Delete the actual folder entry
|
||||
folder.delete(&conn)?;
|
||||
folder.delete(&mut conn).await?;
|
||||
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder);
|
||||
nt.send_folder_update(UpdateType::FolderDelete, &folder).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,28 +1,45 @@
|
||||
mod accounts;
|
||||
pub mod accounts;
|
||||
mod ciphers;
|
||||
mod emergency_access;
|
||||
mod events;
|
||||
mod folders;
|
||||
mod organizations;
|
||||
mod sends;
|
||||
pub mod two_factor;
|
||||
|
||||
pub use ciphers::purge_trashed_ciphers;
|
||||
pub use ciphers::{CipherSyncData, CipherSyncType};
|
||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||
pub use sends::purge_sends;
|
||||
pub use two_factor::send_incomplete_2fa_notifications;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut mod_routes =
|
||||
routes![clear_device_token, put_device_token, get_eq_domains, post_eq_domains, put_eq_domains, hibp_breach,];
|
||||
let mut device_token_routes = routes![clear_device_token, put_device_token];
|
||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||
let mut hibp_routes = routes![hibp_breach];
|
||||
let mut meta_routes = routes![alive, now, version, config];
|
||||
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut accounts::routes());
|
||||
routes.append(&mut ciphers::routes());
|
||||
routes.append(&mut emergency_access::routes());
|
||||
routes.append(&mut events::routes());
|
||||
routes.append(&mut folders::routes());
|
||||
routes.append(&mut organizations::routes());
|
||||
routes.append(&mut two_factor::routes());
|
||||
routes.append(&mut sends::routes());
|
||||
routes.append(&mut mod_routes);
|
||||
routes.append(&mut device_token_routes);
|
||||
routes.append(&mut eq_domains_routes);
|
||||
routes.append(&mut hibp_routes);
|
||||
routes.append(&mut meta_routes);
|
||||
|
||||
routes
|
||||
}
|
||||
|
||||
pub fn events_routes() -> Vec<Route> {
|
||||
let mut routes = Vec::new();
|
||||
routes.append(&mut events::main_routes());
|
||||
|
||||
routes
|
||||
}
|
||||
@@ -30,8 +47,9 @@ pub fn routes() -> Vec<Route> {
|
||||
//
|
||||
// Move this somewhere else
|
||||
//
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Catcher;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
@@ -120,7 +138,7 @@ struct EquivDomainData {
|
||||
}
|
||||
|
||||
#[post("/settings/domains", data = "<data>")]
|
||||
fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: EquivDomainData = data.into_inner().data;
|
||||
|
||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
||||
@@ -132,18 +150,18 @@ fn post_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: Db
|
||||
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
|
||||
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
|
||||
|
||||
user.save(&conn)?;
|
||||
user.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
#[put("/settings/domains", data = "<data>")]
|
||||
fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn)
|
||||
async fn put_eq_domains(data: JsonUpcase<EquivDomainData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
post_eq_domains(data, headers, conn).await
|
||||
}
|
||||
|
||||
#[get("/hibp/breach?<username>")]
|
||||
fn hibp_breach(username: String) -> JsonResult {
|
||||
async fn hibp_breach(username: String) -> JsonResult {
|
||||
let url = format!(
|
||||
"https://haveibeenpwned.com/api/v3/breachedaccount/{}?truncateResponse=false&includeUnverified=false",
|
||||
username
|
||||
@@ -152,14 +170,14 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
if let Some(api_key) = crate::CONFIG.hibp_api_key() {
|
||||
let hibp_client = get_reqwest_client();
|
||||
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send()?;
|
||||
let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?;
|
||||
|
||||
// If we get a 404, return a 404, it means no breached accounts
|
||||
if res.status() == 404 {
|
||||
return Err(Error::empty().with_code(404));
|
||||
}
|
||||
|
||||
let value: Value = res.error_for_status()?.json()?;
|
||||
let value: Value = res.error_for_status()?.json().await?;
|
||||
Ok(Json(value))
|
||||
} else {
|
||||
Ok(Json(json!([{
|
||||
@@ -169,7 +187,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
"BreachDate": "2019-08-18T00:00:00Z",
|
||||
"AddedDate": "2019-08-18T00:00:00Z",
|
||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||
"LogoPath": "bwrs_static/hibp.png",
|
||||
"LogoPath": "vw_static/hibp.png",
|
||||
"PwnCount": 0,
|
||||
"DataClasses": [
|
||||
"Error - No API key set!"
|
||||
@@ -177,3 +195,54 @@ fn hibp_breach(username: String) -> JsonResult {
|
||||
}])))
|
||||
}
|
||||
}
|
||||
|
||||
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
||||
#[get("/alive")]
|
||||
fn alive(_conn: DbConn) -> Json<String> {
|
||||
now()
|
||||
}
|
||||
|
||||
#[get("/now")]
|
||||
pub fn now() -> Json<String> {
|
||||
Json(crate::util::format_date(&chrono::Utc::now().naive_utc()))
|
||||
}
|
||||
|
||||
#[get("/version")]
|
||||
fn version() -> Json<&'static str> {
|
||||
Json(crate::VERSION.unwrap_or_default())
|
||||
}
|
||||
|
||||
#[get("/config")]
|
||||
fn config() -> Json<Value> {
|
||||
let domain = crate::CONFIG.domain();
|
||||
Json(json!({
|
||||
"version": crate::VERSION,
|
||||
"gitHash": option_env!("GIT_REV"),
|
||||
"server": {
|
||||
"name": "Vaultwarden",
|
||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
||||
},
|
||||
"environment": {
|
||||
"vault": domain,
|
||||
"api": format!("{domain}/api"),
|
||||
"identity": format!("{domain}/identity"),
|
||||
"notifications": format!("{domain}/notifications"),
|
||||
"sso": "",
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
catchers![api_not_found]
|
||||
}
|
||||
|
||||
#[catch(404)]
|
||||
fn api_not_found() -> Json<Value> {
|
||||
Json(json!({
|
||||
"error": {
|
||||
"code": 404,
|
||||
"reason": "Not Found",
|
||||
"description": "The requested resource could not be found."
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
use std::{io::Read, path::Path};
|
||||
use std::path::Path;
|
||||
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use multipart::server::{save::SavedData, Multipart, SaveResult};
|
||||
use rocket::{http::ContentType, response::NamedFile, Data};
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::form::Form;
|
||||
use rocket::fs::NamedFile;
|
||||
use rocket::fs::TempFile;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
||||
auth::{Headers, Host},
|
||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
||||
auth::{ClientIp, Headers, Host},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::SafeString,
|
||||
CONFIG,
|
||||
@@ -16,6 +17,9 @@ use crate::{
|
||||
|
||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||
|
||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: u64 = 550_502_400;
|
||||
|
||||
pub fn routes() -> Vec<rocket::Route> {
|
||||
routes![
|
||||
get_sends,
|
||||
@@ -27,14 +31,16 @@ pub fn routes() -> Vec<rocket::Route> {
|
||||
put_send,
|
||||
delete_send,
|
||||
put_remove_password,
|
||||
download_send
|
||||
download_send,
|
||||
post_send_file_v2,
|
||||
post_send_file_v2_data
|
||||
]
|
||||
}
|
||||
|
||||
pub fn purge_sends(pool: DbPool) {
|
||||
pub async fn purge_sends(pool: DbPool) {
|
||||
debug!("Purging sends");
|
||||
if let Ok(conn) = pool.get() {
|
||||
Send::purge(&conn);
|
||||
if let Ok(mut conn) = pool.get().await {
|
||||
Send::purge(&mut conn).await;
|
||||
} else {
|
||||
error!("Failed to get DB connection while purging sends")
|
||||
}
|
||||
@@ -42,21 +48,22 @@ pub fn purge_sends(pool: DbPool) {
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendData {
|
||||
pub Type: i32,
|
||||
pub Key: String,
|
||||
pub Password: Option<String>,
|
||||
pub MaxAccessCount: Option<i32>,
|
||||
pub ExpirationDate: Option<DateTime<Utc>>,
|
||||
pub DeletionDate: DateTime<Utc>,
|
||||
pub Disabled: bool,
|
||||
pub HideEmail: Option<bool>,
|
||||
struct SendData {
|
||||
Type: i32,
|
||||
Key: String,
|
||||
Password: Option<String>,
|
||||
MaxAccessCount: Option<NumberOrString>,
|
||||
ExpirationDate: Option<DateTime<Utc>>,
|
||||
DeletionDate: DateTime<Utc>,
|
||||
Disabled: bool,
|
||||
HideEmail: Option<bool>,
|
||||
|
||||
// Data field
|
||||
pub Name: String,
|
||||
pub Notes: Option<String>,
|
||||
pub Text: Option<Value>,
|
||||
pub File: Option<Value>,
|
||||
Name: String,
|
||||
Notes: Option<String>,
|
||||
Text: Option<Value>,
|
||||
File: Option<Value>,
|
||||
FileLength: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||
@@ -67,10 +74,11 @@ pub struct SendData {
|
||||
///
|
||||
/// There is also a Vaultwarden-specific `sends_allowed` config setting that
|
||||
/// controls this policy globally.
|
||||
fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let policy_type = OrgPolicyType::DisableSend;
|
||||
if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
|
||||
if !CONFIG.sends_allowed()
|
||||
|| OrgPolicy::is_applicable_to_user(user_uuid, OrgPolicyType::DisableSend, None, conn).await
|
||||
{
|
||||
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
|
||||
}
|
||||
Ok(())
|
||||
@@ -82,10 +90,10 @@ fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult
|
||||
/// but is allowed to remove this option from an existing Send.
|
||||
///
|
||||
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||
fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult {
|
||||
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||
let user_uuid = &headers.user.uuid;
|
||||
let hide_email = data.HideEmail.unwrap_or(false);
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn) {
|
||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
||||
err!(
|
||||
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||
from recipients when creating or editing a Send."
|
||||
@@ -119,7 +127,10 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
||||
send.user_uuid = Some(user_uuid);
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.max_access_count = match data.MaxAccessCount {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.disabled = data.Disabled;
|
||||
send.hide_email = data.HideEmail;
|
||||
@@ -131,9 +142,9 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||
}
|
||||
|
||||
#[get("/sends")]
|
||||
fn get_sends(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let sends = Send::find_by_user(&headers.user.uuid, &conn);
|
||||
let sends_json: Vec<Value> = sends.iter().map(|s| s.to_json()).collect();
|
||||
async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
let sends = Send::find_by_user(&headers.user.uuid, &mut conn);
|
||||
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
||||
|
||||
Json(json!({
|
||||
"Data": sends_json,
|
||||
@@ -143,8 +154,8 @@ fn get_sends(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
}
|
||||
|
||||
#[get("/sends/<uuid>")]
|
||||
fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let send = match Send::find_by_uuid(&uuid, &conn) {
|
||||
async fn get_send(uuid: String, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let send = match Send::find_by_uuid(&uuid, &mut conn).await {
|
||||
Some(send) => send,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -157,50 +168,53 @@ fn get_send(uuid: String, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
#[post("/sends", data = "<data>")]
|
||||
fn post_send(data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
if data.Type == SendType::File as i32 {
|
||||
err!("File sends should use /api/sends/file")
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
send.save(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&conn));
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
struct UploadData<'f> {
|
||||
model: Json<crate::util::UpCase<SendData>>,
|
||||
data: TempFile<'f>,
|
||||
}
|
||||
|
||||
#[derive(FromForm)]
|
||||
struct UploadDataV2<'f> {
|
||||
data: TempFile<'f>,
|
||||
}
|
||||
|
||||
// @deprecated Mar 25 2021: This method has been deprecated in favor of direct uploads (v2).
|
||||
// This method still exists to support older clients, probably need to remove it sometime.
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L164-L167
|
||||
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
|
||||
fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let boundary = content_type.params().next().expect("No boundary provided").1;
|
||||
let UploadData {
|
||||
model,
|
||||
mut data,
|
||||
} = data.into_inner();
|
||||
let model = model.into_inner().data;
|
||||
|
||||
let mut mpart = Multipart::with_body(data.open(), boundary);
|
||||
|
||||
// First entry is the SendData JSON
|
||||
let mut model_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "model" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
|
||||
let mut buf = String::new();
|
||||
model_entry.data.read_to_string(&mut buf)?;
|
||||
let data = serde_json::from_str::<crate::util::UpCase<SendData>>(&buf)?;
|
||||
enforce_disable_hide_email_policy(&data.data, &headers, &conn)?;
|
||||
|
||||
// Get the file length and add an extra 5% to avoid issues
|
||||
const SIZE_525_MB: u64 = 550_502_400;
|
||||
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &conn);
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
@@ -209,55 +223,152 @@ fn post_send_file(data: Data, content_type: &ContentType, headers: Headers, conn
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
// Create the Send
|
||||
let mut send = create_send(data.data, headers.user.uuid)?;
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
|
||||
let mut send = create_send(model, headers.user.uuid)?;
|
||||
if send.atype != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
let file_path = Path::new(&CONFIG.sends_folder()).join(&send.uuid).join(&file_id);
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data
|
||||
{
|
||||
err!("Error reading send file data. Please try an other client.");
|
||||
}
|
||||
|
||||
// Read the data entry and save the file
|
||||
let mut data_entry = match mpart.read_entry()? {
|
||||
Some(e) if &*e.headers.name == "data" => e,
|
||||
Some(_) => err!("Invalid entry name"),
|
||||
None => err!("No model entry present"),
|
||||
};
|
||||
let size = data.len();
|
||||
if size > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let size = match data_entry.data.save().memory_threshold(0).size_limit(size_limit).with_path(&file_path) {
|
||||
SaveResult::Full(SavedData::File(_, size)) => size as i32,
|
||||
SaveResult::Full(other) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment is not a file: {:?}", other));
|
||||
}
|
||||
SaveResult::Partial(_, reason) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Attachment storage limit exceeded with this file: {:?}", reason));
|
||||
}
|
||||
SaveResult::Error(e) => {
|
||||
std::fs::remove_file(&file_path).ok();
|
||||
err!(format!("Error: {:?}", e));
|
||||
}
|
||||
};
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.persist_to(&file_path).await {
|
||||
data.move_copy_to(file_path).await?
|
||||
}
|
||||
|
||||
// Set ID and sizes
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id));
|
||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
|
||||
// Save the changes in the database
|
||||
send.save(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn));
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
|
||||
#[post("/sends/file/v2", data = "<data>")]
|
||||
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data = data.into_inner().data;
|
||||
|
||||
if data.Type != SendType::File as i32 {
|
||||
err!("Send content is not a file");
|
||||
}
|
||||
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let file_length = match &data.FileLength {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let size_limit = match CONFIG.user_attachment_limit() {
|
||||
Some(0) => err!("File uploads are disabled"),
|
||||
Some(limit_kb) => {
|
||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
||||
if left <= 0 {
|
||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
||||
}
|
||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
||||
}
|
||||
None => SIZE_525_MB,
|
||||
};
|
||||
|
||||
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
|
||||
err!("Attachment storage limit exceeded with this file");
|
||||
}
|
||||
|
||||
let mut send = create_send(data, headers.user.uuid)?;
|
||||
|
||||
let file_id = crate::crypto::generate_send_id();
|
||||
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
||||
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
|
||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
|
||||
}
|
||||
send.data = serde_json::to_string(&data_value)?;
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(json!({
|
||||
"fileUploadType": 0, // 0 == Direct | 1 == Azure
|
||||
"object": "send-fileUpload",
|
||||
"url": format!("/sends/{}/file/{}", send.uuid, file_id),
|
||||
"sendResponse": send.to_json()
|
||||
})))
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243
|
||||
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
|
||||
async fn post_send_file_v2_data(
|
||||
send_uuid: String,
|
||||
file_id: String,
|
||||
data: Form<UploadDataV2<'_>>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let mut data = data.into_inner();
|
||||
|
||||
// There is a bug regarding uploading attachments/sends using the Mobile clients
|
||||
// See: https://github.com/dani-garcia/vaultwarden/issues/2644 && https://github.com/bitwarden/mobile/issues/2018
|
||||
// This has been fixed via a PR: https://github.com/bitwarden/mobile/pull/2031, but hasn't landed in a new release yet.
|
||||
// On the vaultwarden side this is temporarily fixed by using a custom multer library
|
||||
// See: https://github.com/dani-garcia/vaultwarden/pull/2675
|
||||
// In any case we will match TempFile::File and not TempFile::Buffered, since Buffered will alter the contents.
|
||||
if let TempFile::Buffered {
|
||||
content: _,
|
||||
} = &data.data
|
||||
{
|
||||
err!("Error reading attachment data. Please try an other client.");
|
||||
}
|
||||
|
||||
if let Some(send) = Send::find_by_uuid(&send_uuid, &mut conn).await {
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send_uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
|
||||
nt.send_send_update(UpdateType::SyncSendCreate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
} else {
|
||||
err!("Send not found. Unable to save the file.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[allow(non_snake_case)]
|
||||
pub struct SendAccessData {
|
||||
@@ -265,8 +376,13 @@ pub struct SendAccessData {
|
||||
}
|
||||
|
||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||
fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &conn) {
|
||||
async fn post_access(
|
||||
access_id: String,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_access_id(&access_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
@@ -294,8 +410,8 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
||||
if send.password_hash.is_some() {
|
||||
match data.into_inner().data.Password {
|
||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||
Some(_) => err!("Invalid password."),
|
||||
None => err_code!("Password not provided", 401),
|
||||
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
||||
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -304,20 +420,20 @@ fn post_access(access_id: String, data: JsonUpcase<SendAccessData>, conn: DbConn
|
||||
send.access_count += 1;
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
Ok(Json(send.to_json_access(&conn)))
|
||||
Ok(Json(send.to_json_access(&mut conn).await))
|
||||
}
|
||||
|
||||
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
|
||||
fn post_access_file(
|
||||
async fn post_access_file(
|
||||
send_id: String,
|
||||
file_id: String,
|
||||
data: JsonUpcase<SendAccessData>,
|
||||
host: Host,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&send_id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
|
||||
};
|
||||
@@ -352,7 +468,7 @@ fn post_access_file(
|
||||
|
||||
send.access_count += 1;
|
||||
|
||||
send.save(&conn)?;
|
||||
send.save(&mut conn).await?;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
@@ -364,23 +480,29 @@ fn post_access_file(
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||
async fn download_send(send_id: SafeString, file_id: SafeString, t: String) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(&t) {
|
||||
if claims.sub == format!("{}/{}", send_id, file_id) {
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).ok();
|
||||
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[put("/sends/<id>", data = "<data>")]
|
||||
fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn put_send(
|
||||
id: String,
|
||||
data: JsonUpcase<SendData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let data: SendData = data.into_inner().data;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &conn)?;
|
||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -414,7 +536,10 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
|
||||
send.akey = data.Key;
|
||||
send.deletion_date = data.DeletionDate.naive_utc();
|
||||
send.notes = data.Notes;
|
||||
send.max_access_count = data.MaxAccessCount;
|
||||
send.max_access_count = match data.MaxAccessCount {
|
||||
Some(m) => Some(m.into_i32()?),
|
||||
_ => None,
|
||||
};
|
||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
||||
send.hide_email = data.HideEmail;
|
||||
send.disabled = data.Disabled;
|
||||
@@ -424,15 +549,15 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
|
||||
send.set_password(Some(&password));
|
||||
}
|
||||
|
||||
send.save(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn));
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
#[delete("/sends/<id>")]
|
||||
fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &conn) {
|
||||
async fn delete_send(id: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||
let send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -441,17 +566,17 @@ fn delete_send(id: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyR
|
||||
err!("Send is not owned by user")
|
||||
}
|
||||
|
||||
send.delete(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&conn));
|
||||
send.delete(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendDelete, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/sends/<id>/remove-password")]
|
||||
fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &conn)?;
|
||||
async fn put_remove_password(id: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let mut send = match Send::find_by_uuid(&id, &conn) {
|
||||
let mut send = match Send::find_by_uuid(&id, &mut conn).await {
|
||||
Some(s) => s,
|
||||
None => err!("Send not found"),
|
||||
};
|
||||
@@ -461,8 +586,8 @@ fn put_remove_password(id: String, headers: Headers, conn: DbConn, nt: Notify) -
|
||||
}
|
||||
|
||||
send.set_password(None);
|
||||
send.save(&conn)?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&conn));
|
||||
send.save(&mut conn).await?;
|
||||
nt.send_send_update(UpdateType::SyncSendUpdate, &send, &send.update_users_revision(&mut conn).await).await;
|
||||
|
||||
Ok(Json(send.to_json()))
|
||||
}
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
use data_encoding::BASE32;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
||||
NumberOrString, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
};
|
||||
@@ -21,7 +22,7 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||
fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -30,11 +31,11 @@ fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, conn
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::Authenticator as i32;
|
||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn);
|
||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
||||
|
||||
let (enabled, key) = match twofactor {
|
||||
Some(tf) => (true, tf.data),
|
||||
_ => (false, BASE32.encode(&crypto::get_random(vec![0u8; 20]))),
|
||||
_ => (false, crypto::encode_random_bytes::<20>(BASE32)),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -53,11 +54,11 @@ struct EnableAuthenticatorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator(
|
||||
async fn activate_authenticator(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
mut conn: DbConn,
|
||||
) -> JsonResult {
|
||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
@@ -81,9 +82,11 @@ fn activate_authenticator(
|
||||
}
|
||||
|
||||
// Validate the token provided with the key, and save new twofactor
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &conn)?;
|
||||
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &ip, &mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -93,30 +96,36 @@ fn activate_authenticator(
|
||||
}
|
||||
|
||||
#[put("/two-factor/authenticator", data = "<data>")]
|
||||
fn activate_authenticator_put(
|
||||
async fn activate_authenticator_put(
|
||||
data: JsonUpcase<EnableAuthenticatorData>,
|
||||
headers: Headers,
|
||||
ip: ClientIp,
|
||||
conn: DbConn,
|
||||
) -> JsonResult {
|
||||
activate_authenticator(data, headers, ip, conn)
|
||||
activate_authenticator(data, headers, ip, conn).await
|
||||
}
|
||||
|
||||
pub fn validate_totp_code_str(
|
||||
pub async fn validate_totp_code_str(
|
||||
user_uuid: &str,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
if !totp_code.chars().all(char::is_numeric) {
|
||||
err!("TOTP code is not a number");
|
||||
}
|
||||
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn)
|
||||
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await
|
||||
}
|
||||
|
||||
pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &ClientIp, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_totp_code(
|
||||
user_uuid: &str,
|
||||
totp_code: &str,
|
||||
secret: &str,
|
||||
ip: &ClientIp,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
use totp_lite::{totp_custom, Sha1};
|
||||
|
||||
let decoded_secret = match BASE32.decode(secret.as_bytes()) {
|
||||
@@ -124,15 +133,16 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &C
|
||||
Err(_) => err!("Invalid TOTP secret"),
|
||||
};
|
||||
|
||||
let mut twofactor = match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn) {
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
let mut twofactor =
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()),
|
||||
};
|
||||
|
||||
// The amount of steps back and forward in time
|
||||
// Also check if we need to disable time drifted TOTP codes.
|
||||
// If that is the case, we set the steps to 0 so only the current TOTP is valid.
|
||||
let steps = !CONFIG.authenticator_disable_time_drift() as i64;
|
||||
let steps = i64::from(!CONFIG.authenticator_disable_time_drift());
|
||||
|
||||
// Get the current system time in UNIX Epoch (UTC)
|
||||
let current_time = chrono::Utc::now();
|
||||
@@ -147,7 +157,7 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &C
|
||||
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
||||
|
||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
||||
if generated == totp_code && time_step > twofactor.last_used as i64 {
|
||||
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
|
||||
// If the step does not equals 0 the time is drifted either server or client side.
|
||||
if step != 0 {
|
||||
warn!("TOTP Time drift detected. The step offset is {}", step);
|
||||
@@ -156,14 +166,24 @@ pub fn validate_totp_code(user_uuid: &str, totp_code: &str, secret: &str, ip: &C
|
||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||
// This will also save a newly created twofactor if the code is correct.
|
||||
twofactor.last_used = time_step as i32;
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
return Ok(());
|
||||
} else if generated == totp_code && time_step <= twofactor.last_used as i64 {
|
||||
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
|
||||
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
err!(
|
||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Else no valide code received, deny access
|
||||
err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
|
||||
err!(
|
||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
use chrono::Utc;
|
||||
use data_encoding::BASE64;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
api::{
|
||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType, User},
|
||||
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||
DbConn,
|
||||
},
|
||||
error::MapResult,
|
||||
@@ -89,14 +92,14 @@ impl DuoStatus {
|
||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||
|
||||
#[post("/two-factor/get-duo", data = "<data>")]
|
||||
fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let data = get_user_duo_data(&headers.user.uuid, &conn);
|
||||
let data = get_user_duo_data(&headers.user.uuid, &mut conn).await;
|
||||
|
||||
let (enabled, data) = match data {
|
||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||
@@ -152,7 +155,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
||||
}
|
||||
|
||||
#[post("/two-factor/duo", data = "<data>")]
|
||||
fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
let data: EnableDuoData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -163,7 +166,7 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||
let data_req: DuoData = data.into();
|
||||
let data_str = serde_json::to_string(&data_req)?;
|
||||
duo_api_request("GET", "/auth/v2/check", "", &data_req).map_res("Failed to validate Duo credentials")?;
|
||||
duo_api_request("GET", "/auth/v2/check", "", &data_req).await.map_res("Failed to validate Duo credentials")?;
|
||||
(data_req.obscure(), data_str)
|
||||
} else {
|
||||
(DuoData::secret(), String::new())
|
||||
@@ -171,9 +174,11 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
|
||||
let type_ = TwoFactorType::Duo;
|
||||
let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str);
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
@@ -185,11 +190,11 @@ fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn)
|
||||
}
|
||||
|
||||
#[put("/two-factor/duo", data = "<data>")]
|
||||
fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_duo(data, headers, conn)
|
||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
activate_duo(data, headers, conn, ip).await
|
||||
}
|
||||
|
||||
fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
|
||||
use reqwest::{header, Method};
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -209,7 +214,8 @@ fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> Em
|
||||
.basic_auth(username, Some(password))
|
||||
.header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)")
|
||||
.header(header::DATE, date)
|
||||
.send()?
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()?;
|
||||
|
||||
Ok(())
|
||||
@@ -222,11 +228,11 @@ const AUTH_PREFIX: &str = "AUTH";
|
||||
const DUO_PREFIX: &str = "TX";
|
||||
const APP_PREFIX: &str = "APP";
|
||||
|
||||
fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus {
|
||||
let type_ = TwoFactorType::Duo as i32;
|
||||
|
||||
// If the user doesn't have an entry, disabled
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await {
|
||||
Some(t) => t,
|
||||
None => return DuoStatus::Disabled(DuoData::global().is_some()),
|
||||
};
|
||||
@@ -246,19 +252,20 @@ fn get_user_duo_data(uuid: &str, conn: &DbConn) -> DuoStatus {
|
||||
}
|
||||
|
||||
// let (ik, sk, ak, host) = get_duo_keys();
|
||||
fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
let data = User::find_by_mail(email, conn)
|
||||
.and_then(|u| get_user_duo_data(&u.uuid, conn).data())
|
||||
.or_else(DuoData::global)
|
||||
.map_res("Can't fetch Duo keys")?;
|
||||
async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> {
|
||||
let data = match User::find_by_mail(email, conn).await {
|
||||
Some(u) => get_user_duo_data(&u.uuid, conn).await.data(),
|
||||
_ => DuoData::global(),
|
||||
}
|
||||
.map_res("Can't fetch Duo Keys")?;
|
||||
|
||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
|
||||
}
|
||||
|
||||
pub fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> {
|
||||
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {
|
||||
let now = Utc::now().timestamp();
|
||||
|
||||
let (ik, sk, ak, host) = get_duo_keys_email(email, conn)?;
|
||||
let (ik, sk, ak, host) = get_duo_keys_email(email, conn).await?;
|
||||
|
||||
let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE);
|
||||
let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE);
|
||||
@@ -273,14 +280,19 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64
|
||||
format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie))
|
||||
}
|
||||
|
||||
pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
// email is as entered by the user, so it needs to be normalized before
|
||||
// comparison with auth_user below.
|
||||
let email = &email.to_lowercase();
|
||||
|
||||
let split: Vec<&str> = response.split(':').collect();
|
||||
if split.len() != 2 {
|
||||
err!("Invalid response length");
|
||||
err!(
|
||||
"Invalid response length",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
let auth_sig = split[0];
|
||||
@@ -288,13 +300,18 @@ pub fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyRe
|
||||
|
||||
let now = Utc::now().timestamp();
|
||||
|
||||
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn)?;
|
||||
let (ik, sk, ak, _host) = get_duo_keys_email(email, conn).await?;
|
||||
|
||||
let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?;
|
||||
let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
|
||||
|
||||
if !crypto::ct_eq(&auth_user, app_user) || !crypto::ct_eq(&auth_user, email) {
|
||||
err!("Error validating duo authentication")
|
||||
err!(
|
||||
"Error validating duo authentication",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
api::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
crypto,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
@@ -28,13 +31,13 @@ struct SendEmailLoginData {
|
||||
/// User is trying to login and wants to use email 2FA.
|
||||
/// Does not require Bearer token
|
||||
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
||||
fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> EmptyResult {
|
||||
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailLoginData = data.into_inner().data;
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let user = match User::find_by_mail(&data.Email, &conn) {
|
||||
let user = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
};
|
||||
@@ -48,31 +51,32 @@ fn send_email_login(data: JsonUpcase<SendEmailLoginData>, conn: DbConn) -> Empty
|
||||
err!("Email 2FA is disabled")
|
||||
}
|
||||
|
||||
send_token(&user.uuid, &conn)?;
|
||||
send_token(&user.uuid, &mut conn).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate the token, save the data for later verification and send email to user
|
||||
pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, conn).map_res("Two factor not found")?;
|
||||
let mut twofactor =
|
||||
TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?;
|
||||
|
||||
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
|
||||
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
twofactor_data.set_token(generated_token);
|
||||
twofactor.data = twofactor_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When user clicks on Manage email 2FA show the user the related information
|
||||
#[post("/two-factor/get-email", data = "<data>")]
|
||||
fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -80,13 +84,14 @@ fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) ->
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, mfa_email) = match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &conn) {
|
||||
Some(x) => {
|
||||
let twofactor_data = EmailTokenData::from_json(&x.data)?;
|
||||
(true, json!(twofactor_data.email))
|
||||
}
|
||||
_ => (false, json!(null)),
|
||||
};
|
||||
let (enabled, mfa_email) =
|
||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
||||
Some(x) => {
|
||||
let twofactor_data = EmailTokenData::from_json(&x.data)?;
|
||||
(true, json!(twofactor_data.email))
|
||||
}
|
||||
_ => (false, json!(null)),
|
||||
};
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": mfa_email,
|
||||
@@ -105,7 +110,7 @@ struct SendEmailData {
|
||||
|
||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||
#[post("/two-factor/send-email", data = "<data>")]
|
||||
fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||
let data: SendEmailData = data.into_inner().data;
|
||||
let user = headers.user;
|
||||
|
||||
@@ -119,18 +124,18 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
|
||||
|
||||
let type_ = TwoFactorType::Email as i32;
|
||||
|
||||
if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
tf.delete(&conn)?;
|
||||
if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
tf.delete(&mut conn).await?;
|
||||
}
|
||||
|
||||
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
|
||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
||||
|
||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&mut conn).await?;
|
||||
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?)?;
|
||||
mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -145,7 +150,7 @@ struct EmailData {
|
||||
|
||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||
#[put("/two-factor/email", data = "<data>")]
|
||||
fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
let data: EmailData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -154,7 +159,8 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).map_res("Two factor not found")?;
|
||||
let mut twofactor =
|
||||
TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await.map_res("Two factor not found")?;
|
||||
|
||||
let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
|
||||
@@ -170,9 +176,11 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
email_data.reset_token();
|
||||
twofactor.atype = TwoFactorType::Email as i32;
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(&conn)?;
|
||||
twofactor.save(&mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"Email": email_data.email,
|
||||
@@ -182,13 +190,19 @@ fn email(data: JsonUpcase<EmailData>, headers: Headers, conn: DbConn) -> JsonRes
|
||||
}
|
||||
|
||||
/// Validate the email code when used as TwoFactor token mechanism
|
||||
pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let mut email_data = EmailTokenData::from_json(data)?;
|
||||
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn)
|
||||
.await
|
||||
.map_res("Two factor not found")?;
|
||||
let issued_token = match &email_data.last_token {
|
||||
Some(t) => t,
|
||||
_ => err!("No token available"),
|
||||
_ => err!(
|
||||
"No token available",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
),
|
||||
};
|
||||
|
||||
if !crypto::ct_eq(issued_token, token) {
|
||||
@@ -197,23 +211,34 @@ pub fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &
|
||||
email_data.reset_token();
|
||||
}
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
err!("Token is invalid")
|
||||
err!(
|
||||
"Token is invalid",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
email_data.reset_token();
|
||||
twofactor.data = email_data.to_json();
|
||||
twofactor.save(conn)?;
|
||||
twofactor.save(conn).await?;
|
||||
|
||||
let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
|
||||
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
|
||||
let max_time = CONFIG.email_expiration_time() as i64;
|
||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
||||
err!("Token has expired")
|
||||
err!(
|
||||
"Token has expired",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Data stored in the TwoFactor table in the db
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct EmailTokenData {
|
||||
@@ -309,18 +334,4 @@ mod tests {
|
||||
// If it's smaller than 3 characters it should only show asterisks.
|
||||
assert_eq!(result, "***@example.ext");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token() {
|
||||
let result = crypto::generate_token(19).unwrap();
|
||||
|
||||
assert_eq!(result.chars().count(), 19);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token_too_large() {
|
||||
let result = crypto::generate_token(20);
|
||||
|
||||
assert!(result.is_err(), "too large token should give an error");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,30 +1,36 @@
|
||||
use chrono::{Duration, Utc};
|
||||
use data_encoding::BASE32;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::Headers,
|
||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
||||
auth::{ClientHeaders, ClientIp, Headers},
|
||||
crypto,
|
||||
db::{models::*, DbConn},
|
||||
db::{models::*, DbConn, DbPool},
|
||||
mail, CONFIG,
|
||||
};
|
||||
|
||||
pub mod authenticator;
|
||||
pub mod duo;
|
||||
pub mod email;
|
||||
pub mod u2f;
|
||||
pub mod webauthn;
|
||||
pub mod yubikey;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
let mut routes = routes![get_twofactor, get_recover, recover, disable_twofactor, disable_twofactor_put,];
|
||||
let mut routes = routes![
|
||||
get_twofactor,
|
||||
get_recover,
|
||||
recover,
|
||||
disable_twofactor,
|
||||
disable_twofactor_put,
|
||||
get_device_verification_settings,
|
||||
];
|
||||
|
||||
routes.append(&mut authenticator::routes());
|
||||
routes.append(&mut duo::routes());
|
||||
routes.append(&mut email::routes());
|
||||
routes.append(&mut u2f::routes());
|
||||
routes.append(&mut webauthn::routes());
|
||||
routes.append(&mut yubikey::routes());
|
||||
|
||||
@@ -32,8 +38,8 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
|
||||
#[get("/two-factor")]
|
||||
fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn);
|
||||
async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &mut conn).await;
|
||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||
|
||||
Json(json!({
|
||||
@@ -67,13 +73,18 @@ struct RecoverTwoFactor {
|
||||
}
|
||||
|
||||
#[post("/two-factor/recover", data = "<data>")]
|
||||
fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
|
||||
async fn recover(
|
||||
data: JsonUpcase<RecoverTwoFactor>,
|
||||
client_headers: ClientHeaders,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let data: RecoverTwoFactor = data.into_inner().data;
|
||||
|
||||
use crate::db::models::User;
|
||||
|
||||
// Get the user
|
||||
let mut user = match User::find_by_mail(&data.Email, &conn) {
|
||||
let mut user = match User::find_by_mail(&data.Email, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again."),
|
||||
};
|
||||
@@ -89,19 +100,21 @@ fn recover(data: JsonUpcase<RecoverTwoFactor>, conn: DbConn) -> JsonResult {
|
||||
}
|
||||
|
||||
// Remove all twofactors from the user
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &conn)?;
|
||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||
|
||||
log_user_event(EventType::UserRecovered2fa as i32, &user.uuid, client_headers.device_type, &ip.ip, &mut conn).await;
|
||||
|
||||
// Remove the recovery code, not needed without twofactors
|
||||
user.totp_recover = None;
|
||||
user.save(&conn)?;
|
||||
user.save(&mut conn).await?;
|
||||
Ok(Json(json!({})))
|
||||
}
|
||||
|
||||
fn _generate_recover_code(user: &mut User, conn: &DbConn) {
|
||||
async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
||||
if user.totp_recover.is_none() {
|
||||
let totp_recover = BASE32.encode(&crypto::get_random(vec![0u8; 20]));
|
||||
let totp_recover = crypto::encode_random_bytes::<20>(BASE32);
|
||||
user.totp_recover = Some(totp_recover);
|
||||
user.save(conn).ok();
|
||||
user.save(conn).await.ok();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +126,12 @@ struct DisableTwoFactorData {
|
||||
}
|
||||
|
||||
#[post("/two-factor/disable", data = "<data>")]
|
||||
fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn disable_twofactor(
|
||||
data: JsonUpcase<DisableTwoFactorData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let data: DisableTwoFactorData = data.into_inner().data;
|
||||
let password_hash = data.MasterPasswordHash;
|
||||
let user = headers.user;
|
||||
@@ -124,23 +142,25 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
|
||||
|
||||
let type_ = data.Type.into_i32()?;
|
||||
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
twofactor.delete(&conn)?;
|
||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
twofactor.delete(&mut conn).await?;
|
||||
log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
}
|
||||
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).is_empty();
|
||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
|
||||
|
||||
if twofactor_disabled {
|
||||
let policy_type = OrgPolicyType::TwoFactorAuthentication;
|
||||
let org_list = UserOrganization::find_by_user_and_policy(&user.uuid, policy_type, &conn);
|
||||
|
||||
for user_org in org_list.into_iter() {
|
||||
for user_org in
|
||||
UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &mut conn)
|
||||
.await
|
||||
.into_iter()
|
||||
{
|
||||
if user_org.atype < UserOrgType::Admin {
|
||||
if CONFIG.mail_enabled() {
|
||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name)?;
|
||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn).await.unwrap();
|
||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||
}
|
||||
user_org.delete(&conn)?;
|
||||
user_org.delete(&mut conn).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -153,6 +173,61 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
|
||||
}
|
||||
|
||||
#[put("/two-factor/disable", data = "<data>")]
|
||||
fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn)
|
||||
async fn disable_twofactor_put(
|
||||
data: JsonUpcase<DisableTwoFactorData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
disable_twofactor(data, headers, conn, ip).await
|
||||
}
|
||||
|
||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||
debug!("Sending notifications for incomplete 2FA logins");
|
||||
|
||||
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut conn = match pool.get().await {
|
||||
Ok(conn) => conn,
|
||||
_ => {
|
||||
error!("Failed to get DB connection in send_incomplete_2fa_notifications()");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let now = Utc::now().naive_utc();
|
||||
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
|
||||
let time_before = now - time_limit;
|
||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
||||
for login in incomplete_logins {
|
||||
let user = User::find_by_uuid(&login.user_uuid, &mut conn).await.expect("User not found");
|
||||
info!(
|
||||
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
|
||||
user.email, login.ip_address
|
||||
);
|
||||
mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name)
|
||||
.await
|
||||
.expect("Error sending incomplete 2FA email");
|
||||
login.delete(&mut conn).await.expect("Error deleting incomplete 2FA record");
|
||||
}
|
||||
}
|
||||
|
||||
// This function currently is just a dummy and the actual part is not implemented yet.
|
||||
// This also prevents 404 errors.
|
||||
//
|
||||
// See the following Bitwarden PR's regarding this feature.
|
||||
// https://github.com/bitwarden/clients/pull/2843
|
||||
// https://github.com/bitwarden/clients/pull/2839
|
||||
// https://github.com/bitwarden/server/pull/2016
|
||||
//
|
||||
// The HTML part is hidden via the CSS patches done via the bw_web_build repo
|
||||
#[get("/two-factor/get-device-verification-settings")]
|
||||
fn get_device_verification_settings(_headers: Headers, _conn: DbConn) -> Json<Value> {
|
||||
Json(json!({
|
||||
"isDeviceVerificationSectionEnabled":false,
|
||||
"unknownDeviceVerificationEnabled":false,
|
||||
"object":"deviceVerificationSettings"
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -1,352 +0,0 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use u2f::{
|
||||
messages::{RegisterResponse, SignResponse, U2fSignRequest},
|
||||
protocol::{Challenge, U2f},
|
||||
register::Registration,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
||||
PasswordData,
|
||||
},
|
||||
auth::Headers,
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
const U2F_VERSION: &str = "U2F_V2";
|
||||
|
||||
static APP_ID: Lazy<String> = Lazy::new(|| format!("{}/app-id.json", &CONFIG.domain()));
|
||||
static U2F: Lazy<U2f> = Lazy::new(|| U2f::new(APP_ID.clone()));
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![generate_u2f, generate_u2f_challenge, activate_u2f, activate_u2f_put, delete_u2f,]
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-u2f", data = "<data>")]
|
||||
fn generate_u2f(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. U2F disabled")
|
||||
}
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, keys) = get_u2f_registrations(&headers.user.uuid, &conn)?;
|
||||
let keys_json: Vec<Value> = keys.iter().map(U2FRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": enabled,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-u2f-challenge", data = "<data>")]
|
||||
fn generate_u2f_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: PasswordData = data.into_inner().data;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let _type = TwoFactorType::U2fRegisterChallenge;
|
||||
let challenge = _create_u2f_challenge(&headers.user.uuid, _type, &conn).challenge;
|
||||
|
||||
Ok(Json(json!({
|
||||
"UserId": headers.user.uuid,
|
||||
"AppId": APP_ID.to_string(),
|
||||
"Challenge": challenge,
|
||||
"Version": U2F_VERSION,
|
||||
})))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct EnableU2FData {
|
||||
Id: NumberOrString,
|
||||
// 1..5
|
||||
Name: String,
|
||||
MasterPasswordHash: String,
|
||||
DeviceResponse: String,
|
||||
}
|
||||
|
||||
// This struct is referenced from the U2F lib
|
||||
// because it doesn't implement Deserialize
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(remote = "Registration")]
|
||||
struct RegistrationDef {
|
||||
key_handle: Vec<u8>,
|
||||
pub_key: Vec<u8>,
|
||||
attestation_cert: Option<Vec<u8>>,
|
||||
device_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct U2FRegistration {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
#[serde(with = "RegistrationDef")]
|
||||
pub reg: Registration,
|
||||
pub counter: u32,
|
||||
compromised: bool,
|
||||
pub migrated: Option<bool>,
|
||||
}
|
||||
|
||||
impl U2FRegistration {
|
||||
fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.id,
|
||||
"Name": self.name,
|
||||
"Compromised": self.compromised,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// This struct is copied from the U2F lib
|
||||
// to add an optional error code
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct RegisterResponseCopy {
|
||||
pub registration_data: String,
|
||||
pub version: String,
|
||||
pub client_data: String,
|
||||
|
||||
pub error_code: Option<NumberOrString>,
|
||||
}
|
||||
|
||||
impl From<RegisterResponseCopy> for RegisterResponse {
|
||||
fn from(r: RegisterResponseCopy) -> RegisterResponse {
|
||||
RegisterResponse {
|
||||
registration_data: r.registration_data,
|
||||
version: r.version,
|
||||
client_data: r.client_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[post("/two-factor/u2f", data = "<data>")]
|
||||
fn activate_u2f(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: EnableU2FData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let tf_type = TwoFactorType::U2fRegisterChallenge as i32;
|
||||
let tf_challenge = match TwoFactor::find_by_user_and_type(&user.uuid, tf_type, &conn) {
|
||||
Some(c) => c,
|
||||
None => err!("Can't recover challenge"),
|
||||
};
|
||||
|
||||
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
||||
tf_challenge.delete(&conn)?;
|
||||
|
||||
let response: RegisterResponseCopy = serde_json::from_str(&data.DeviceResponse)?;
|
||||
|
||||
let error_code = response.error_code.clone().map_or("0".into(), NumberOrString::into_string);
|
||||
|
||||
if error_code != "0" {
|
||||
err!("Error registering U2F token")
|
||||
}
|
||||
|
||||
let registration = U2F.register_response(challenge, response.into())?;
|
||||
let full_registration = U2FRegistration {
|
||||
id: data.Id.into_i32()?,
|
||||
name: data.Name,
|
||||
reg: registration,
|
||||
compromised: false,
|
||||
counter: 0,
|
||||
migrated: None,
|
||||
};
|
||||
|
||||
let mut regs = get_u2f_registrations(&user.uuid, &conn)?.1;
|
||||
|
||||
// TODO: Check that there is no repeat Id
|
||||
regs.push(full_registration);
|
||||
save_u2f_registrations(&user.uuid, ®s, &conn)?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
|
||||
let keys_json: Vec<Value> = regs.iter().map(U2FRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
#[put("/two-factor/u2f", data = "<data>")]
|
||||
fn activate_u2f_put(data: JsonUpcase<EnableU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_u2f(data, headers, conn)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[allow(non_snake_case)]
|
||||
struct DeleteU2FData {
|
||||
Id: NumberOrString,
|
||||
MasterPasswordHash: String,
|
||||
}
|
||||
|
||||
#[delete("/two-factor/u2f", data = "<data>")]
|
||||
fn delete_u2f(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
let data: DeleteU2FData = data.into_inner().data;
|
||||
|
||||
let id = data.Id.into_i32()?;
|
||||
|
||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let type_ = TwoFactorType::U2f as i32;
|
||||
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, type_, &conn) {
|
||||
Some(tf) => tf,
|
||||
None => err!("U2F data not found!"),
|
||||
};
|
||||
|
||||
let mut data: Vec<U2FRegistration> = match serde_json::from_str(&tf.data) {
|
||||
Ok(d) => d,
|
||||
Err(_) => err!("Error parsing U2F data"),
|
||||
};
|
||||
|
||||
data.retain(|r| r.id != id);
|
||||
|
||||
let new_data_str = serde_json::to_string(&data)?;
|
||||
|
||||
tf.data = new_data_str;
|
||||
tf.save(&conn)?;
|
||||
|
||||
let keys_json: Vec<Value> = data.iter().map(U2FRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
"Enabled": true,
|
||||
"Keys": keys_json,
|
||||
"Object": "twoFactorU2f"
|
||||
})))
|
||||
}
|
||||
|
||||
fn _create_u2f_challenge(user_uuid: &str, type_: TwoFactorType, conn: &DbConn) -> Challenge {
|
||||
let challenge = U2F.generate_challenge().unwrap();
|
||||
|
||||
TwoFactor::new(user_uuid.into(), type_, serde_json::to_string(&challenge).unwrap())
|
||||
.save(conn)
|
||||
.expect("Error saving challenge");
|
||||
|
||||
challenge
|
||||
}
|
||||
|
||||
fn save_u2f_registrations(user_uuid: &str, regs: &[U2FRegistration], conn: &DbConn) -> EmptyResult {
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::U2f, serde_json::to_string(regs)?).save(conn)
|
||||
}
|
||||
|
||||
fn get_u2f_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<U2FRegistration>), Error> {
|
||||
let type_ = TwoFactorType::U2f as i32;
|
||||
let (enabled, regs) = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
Some(tf) => (tf.enabled, tf.data),
|
||||
None => return Ok((false, Vec::new())), // If no data, return empty list
|
||||
};
|
||||
|
||||
let data = match serde_json::from_str(®s) {
|
||||
Ok(d) => d,
|
||||
Err(_) => {
|
||||
// If error, try old format
|
||||
let mut old_regs = _old_parse_registrations(®s);
|
||||
|
||||
if old_regs.len() != 1 {
|
||||
err!("The old U2F format only allows one device")
|
||||
}
|
||||
|
||||
// Convert to new format
|
||||
let new_regs = vec![U2FRegistration {
|
||||
id: 1,
|
||||
name: "Unnamed U2F key".into(),
|
||||
reg: old_regs.remove(0),
|
||||
compromised: false,
|
||||
counter: 0,
|
||||
migrated: None,
|
||||
}];
|
||||
|
||||
// Save new format
|
||||
save_u2f_registrations(user_uuid, &new_regs, conn)?;
|
||||
|
||||
new_regs
|
||||
}
|
||||
};
|
||||
|
||||
Ok((enabled, data))
|
||||
}
|
||||
|
||||
fn _old_parse_registrations(registations: &str) -> Vec<Registration> {
|
||||
#[derive(Deserialize)]
|
||||
struct Helper(#[serde(with = "RegistrationDef")] Registration);
|
||||
|
||||
let regs: Vec<Value> = serde_json::from_str(registations).expect("Can't parse Registration data");
|
||||
|
||||
regs.into_iter().map(|r| serde_json::from_value(r).unwrap()).map(|Helper(r)| r).collect()
|
||||
}
|
||||
|
||||
pub fn generate_u2f_login(user_uuid: &str, conn: &DbConn) -> ApiResult<U2fSignRequest> {
|
||||
let challenge = _create_u2f_challenge(user_uuid, TwoFactorType::U2fLoginChallenge, conn);
|
||||
|
||||
let registrations: Vec<_> = get_u2f_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.reg).collect();
|
||||
|
||||
if registrations.is_empty() {
|
||||
err!("No U2F devices registered")
|
||||
}
|
||||
|
||||
Ok(U2F.sign_request(challenge, registrations))
|
||||
}
|
||||
|
||||
pub fn validate_u2f_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
let challenge_type = TwoFactorType::U2fLoginChallenge as i32;
|
||||
let tf_challenge = TwoFactor::find_by_user_and_type(user_uuid, challenge_type, conn);
|
||||
|
||||
let challenge = match tf_challenge {
|
||||
Some(tf_challenge) => {
|
||||
let challenge: Challenge = serde_json::from_str(&tf_challenge.data)?;
|
||||
tf_challenge.delete(conn)?;
|
||||
challenge
|
||||
}
|
||||
None => err!("Can't recover login challenge"),
|
||||
};
|
||||
let response: SignResponse = serde_json::from_str(response)?;
|
||||
let mut registrations = get_u2f_registrations(user_uuid, conn)?.1;
|
||||
if registrations.is_empty() {
|
||||
err!("No U2F devices registered")
|
||||
}
|
||||
|
||||
for reg in &mut registrations {
|
||||
let response = U2F.sign_response(challenge.clone(), reg.reg.clone(), response.clone(), reg.counter);
|
||||
match response {
|
||||
Ok(new_counter) => {
|
||||
reg.counter = new_counter;
|
||||
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
Err(u2f::u2ferror::U2fError::CounterTooLow) => {
|
||||
reg.compromised = true;
|
||||
save_u2f_registrations(user_uuid, ®istrations, conn)?;
|
||||
|
||||
err!("This device might be compromised!");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("E {:#}", e);
|
||||
// break;
|
||||
}
|
||||
}
|
||||
}
|
||||
err!("error verifying response")
|
||||
}
|
||||
@@ -1,15 +1,17 @@
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use url::Url;
|
||||
use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn};
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
||||
},
|
||||
auth::Headers,
|
||||
auth::{ClientIp, Headers},
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::Error,
|
||||
@@ -20,9 +22,31 @@ pub fn routes() -> Vec<Route> {
|
||||
routes![get_webauthn, generate_webauthn_challenge, activate_webauthn, activate_webauthn_put, delete_webauthn,]
|
||||
}
|
||||
|
||||
// Some old u2f structs still needed for migrating from u2f to WebAuthn
|
||||
// Both `struct Registration` and `struct U2FRegistration` can be removed if we remove the u2f to WebAuthn migration
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Registration {
|
||||
pub key_handle: Vec<u8>,
|
||||
pub pub_key: Vec<u8>,
|
||||
pub attestation_cert: Option<Vec<u8>>,
|
||||
pub device_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct U2FRegistration {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
#[serde(with = "Registration")]
|
||||
pub reg: Registration,
|
||||
pub counter: u32,
|
||||
compromised: bool,
|
||||
pub migrated: Option<bool>,
|
||||
}
|
||||
|
||||
struct WebauthnConfig {
|
||||
url: String,
|
||||
origin: String,
|
||||
origin: Url,
|
||||
rpid: String,
|
||||
}
|
||||
|
||||
@@ -31,13 +55,9 @@ impl WebauthnConfig {
|
||||
let domain = CONFIG.domain();
|
||||
let domain_origin = CONFIG.domain_origin();
|
||||
Webauthn::new(Self {
|
||||
rpid: reqwest::Url::parse(&domain)
|
||||
.map(|u| u.domain().map(str::to_owned))
|
||||
.ok()
|
||||
.flatten()
|
||||
.unwrap_or_default(),
|
||||
rpid: Url::parse(&domain).map(|u| u.domain().map(str::to_owned)).ok().flatten().unwrap_or_default(),
|
||||
url: domain,
|
||||
origin: domain_origin,
|
||||
origin: Url::parse(&domain_origin).unwrap(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -47,7 +67,7 @@ impl webauthn_rs::WebauthnConfig for WebauthnConfig {
|
||||
&self.url
|
||||
}
|
||||
|
||||
fn get_origin(&self) -> &str {
|
||||
fn get_origin(&self) -> &Url {
|
||||
&self.origin
|
||||
}
|
||||
|
||||
@@ -83,7 +103,7 @@ impl WebauthnRegistration {
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||
fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
if !CONFIG.domain_set() {
|
||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||
}
|
||||
@@ -92,7 +112,7 @@ fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn)
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &conn)?;
|
||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &mut conn).await?;
|
||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
|
||||
Ok(Json(json!({
|
||||
@@ -103,12 +123,13 @@ fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn)
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||
fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &conn)?
|
||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &mut conn)
|
||||
.await?
|
||||
.1
|
||||
.into_iter()
|
||||
.map(|r| r.credential.cred_id) // We return the credentialIds to the clients to avoid double registering
|
||||
@@ -124,7 +145,7 @@ fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers,
|
||||
)?;
|
||||
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&conn)?;
|
||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||
|
||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||
challenge_value["status"] = "ok".into();
|
||||
@@ -221,7 +242,12 @@ impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||
}
|
||||
|
||||
#[post("/two-factor/webauthn", data = "<data>")]
|
||||
fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_webauthn(
|
||||
data: JsonUpcase<EnableWebauthnData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let data: EnableWebauthnData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -231,10 +257,10 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
|
||||
// Retrieve and delete the saved challenge state
|
||||
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn) {
|
||||
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||
Some(tf) => {
|
||||
let state: RegistrationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(&conn)?;
|
||||
tf.delete(&mut conn).await?;
|
||||
state
|
||||
}
|
||||
None => err!("Can't recover challenge"),
|
||||
@@ -244,7 +270,7 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
let (credential, _data) =
|
||||
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
|
||||
|
||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn)?.1;
|
||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
|
||||
// TODO: Check for repeated ID's
|
||||
registrations.push(WebauthnRegistration {
|
||||
id: data.Id.into_i32()?,
|
||||
@@ -255,8 +281,12 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
});
|
||||
|
||||
// Save the registrations and return them
|
||||
TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?).save(&conn)?;
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(&mut conn)
|
||||
.await?;
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||
Ok(Json(json!({
|
||||
@@ -267,8 +297,13 @@ fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, con
|
||||
}
|
||||
|
||||
#[put("/two-factor/webauthn", data = "<data>")]
|
||||
fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn)
|
||||
async fn activate_webauthn_put(
|
||||
data: JsonUpcase<EnableWebauthnData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
activate_webauthn(data, headers, conn, ip).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -279,16 +314,17 @@ struct DeleteU2FData {
|
||||
}
|
||||
|
||||
#[delete("/two-factor/webauthn", data = "<data>")]
|
||||
fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let id = data.data.Id.into_i32()?;
|
||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
||||
err!("Invalid password");
|
||||
}
|
||||
|
||||
let mut tf = match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn) {
|
||||
Some(tf) => tf,
|
||||
None => err!("Webauthn data not found!"),
|
||||
};
|
||||
let mut tf =
|
||||
match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("Webauthn data not found!"),
|
||||
};
|
||||
|
||||
let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
|
||||
|
||||
@@ -299,12 +335,13 @@ fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbCo
|
||||
|
||||
let removed_item = data.remove(item_pos);
|
||||
tf.data = serde_json::to_string(&data)?;
|
||||
tf.save(&conn)?;
|
||||
tf.save(&mut conn).await?;
|
||||
drop(tf);
|
||||
|
||||
// If entry is migrated from u2f, delete the u2f entry as well
|
||||
if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn) {
|
||||
use crate::api::core::two_factor::u2f::U2FRegistration;
|
||||
if let Some(mut u2f) =
|
||||
TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &mut conn).await
|
||||
{
|
||||
let mut data: Vec<U2FRegistration> = match serde_json::from_str(&u2f.data) {
|
||||
Ok(d) => d,
|
||||
Err(_) => err!("Error parsing U2F data"),
|
||||
@@ -314,7 +351,7 @@ fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbCo
|
||||
let new_data_str = serde_json::to_string(&data)?;
|
||||
|
||||
u2f.data = new_data_str;
|
||||
u2f.save(&conn)?;
|
||||
u2f.save(&mut conn).await?;
|
||||
}
|
||||
|
||||
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
||||
@@ -326,18 +363,21 @@ fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, conn: DbCo
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn get_webauthn_registrations(user_uuid: &str, conn: &DbConn) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||
pub async fn get_webauthn_registrations(
|
||||
user_uuid: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
|
||||
let type_ = TwoFactorType::Webauthn as i32;
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
||||
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
|
||||
None => Ok((false, Vec::new())), // If no data, return empty list
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||
pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> JsonResult {
|
||||
// Load saved credentials
|
||||
let creds: Vec<Credential> =
|
||||
get_webauthn_registrations(user_uuid, conn)?.1.into_iter().map(|r| r.credential).collect();
|
||||
get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect();
|
||||
|
||||
if creds.is_empty() {
|
||||
err!("No Webauthn devices registered")
|
||||
@@ -349,27 +389,33 @@ pub fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
|
||||
|
||||
// Save the challenge state for later validation
|
||||
TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
|
||||
.save(conn)?;
|
||||
.save(conn)
|
||||
.await?;
|
||||
|
||||
// Return challenge to the clients
|
||||
Ok(Json(serde_json::to_value(response.public_key)?))
|
||||
}
|
||||
|
||||
pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
|
||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn) {
|
||||
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
|
||||
Some(tf) => {
|
||||
let state: AuthenticationState = serde_json::from_str(&tf.data)?;
|
||||
tf.delete(conn)?;
|
||||
tf.delete(conn).await?;
|
||||
state
|
||||
}
|
||||
None => err!("Can't recover login challenge"),
|
||||
None => err!(
|
||||
"Can't recover login challenge",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
),
|
||||
};
|
||||
|
||||
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
|
||||
let rsp: PublicKeyCredential = rsp.data.into();
|
||||
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn)?.1;
|
||||
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
|
||||
|
||||
// If the credential we received is migrated from U2F, enable the U2F compatibility
|
||||
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
|
||||
@@ -380,10 +426,16 @@ pub fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &DbConn) -
|
||||
reg.credential.counter = auth_data.counter;
|
||||
|
||||
TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?)
|
||||
.save(conn)?;
|
||||
.save(conn)
|
||||
.await?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
err!("Credential not present")
|
||||
err!(
|
||||
"Credential not present",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
use yubico::{config::Config, verify};
|
||||
|
||||
use crate::{
|
||||
api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
|
||||
auth::Headers,
|
||||
api::{
|
||||
core::{log_user_event, two_factor::_generate_recover_code},
|
||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
||||
},
|
||||
auth::{ClientIp, Headers},
|
||||
db::{
|
||||
models::{TwoFactor, TwoFactorType},
|
||||
models::{EventType, TwoFactor, TwoFactorType},
|
||||
DbConn,
|
||||
},
|
||||
error::{Error, MapResult},
|
||||
@@ -64,21 +67,23 @@ fn get_yubico_credentials() -> Result<(String, String), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
||||
async fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
||||
let (yubico_id, yubico_secret) = get_yubico_credentials()?;
|
||||
|
||||
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
|
||||
|
||||
match CONFIG.yubico_server() {
|
||||
Some(server) => verify(otp, config.set_api_hosts(vec![server])),
|
||||
None => verify(otp, config),
|
||||
Some(server) => {
|
||||
tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap()
|
||||
}
|
||||
None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(),
|
||||
}
|
||||
.map_res("Failed to verify OTP")
|
||||
.and(Ok(()))
|
||||
}
|
||||
|
||||
#[post("/two-factor/get-yubikey", data = "<data>")]
|
||||
fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
// Make sure the credentials are set
|
||||
get_yubico_credentials()?;
|
||||
|
||||
@@ -92,7 +97,7 @@ fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbCo
|
||||
let user_uuid = &user.uuid;
|
||||
let yubikey_type = TwoFactorType::YubiKey as i32;
|
||||
|
||||
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &conn);
|
||||
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &mut conn).await;
|
||||
|
||||
if let Some(r) = r {
|
||||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
|
||||
@@ -113,7 +118,12 @@ fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbCo
|
||||
}
|
||||
|
||||
#[post("/two-factor/yubikey", data = "<data>")]
|
||||
fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
async fn activate_yubikey(
|
||||
data: JsonUpcase<EnableYubikeyData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
let data: EnableYubikeyData = data.into_inner().data;
|
||||
let mut user = headers.user;
|
||||
|
||||
@@ -122,10 +132,11 @@ fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn:
|
||||
}
|
||||
|
||||
// Check if we already have some data
|
||||
let mut yubikey_data = match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn) {
|
||||
Some(data) => data,
|
||||
None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()),
|
||||
};
|
||||
let mut yubikey_data =
|
||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &mut conn).await {
|
||||
Some(data) => data,
|
||||
None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()),
|
||||
};
|
||||
|
||||
let yubikeys = parse_yubikeys(&data);
|
||||
|
||||
@@ -143,10 +154,10 @@ fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn:
|
||||
continue;
|
||||
}
|
||||
|
||||
verify_yubikey_otp(yubikey.to_owned()).map_res("Invalid Yubikey OTP provided")?;
|
||||
verify_yubikey_otp(yubikey.to_owned()).await.map_res("Invalid Yubikey OTP provided")?;
|
||||
}
|
||||
|
||||
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (&x[..12]).to_owned()).collect();
|
||||
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
|
||||
|
||||
let yubikey_metadata = YubikeyMetadata {
|
||||
Keys: yubikey_ids,
|
||||
@@ -154,9 +165,11 @@ fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn:
|
||||
};
|
||||
|
||||
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
|
||||
yubikey_data.save(&conn)?;
|
||||
yubikey_data.save(&mut conn).await?;
|
||||
|
||||
_generate_recover_code(&mut user, &conn);
|
||||
_generate_recover_code(&mut user, &mut conn).await;
|
||||
|
||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
|
||||
|
||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
||||
|
||||
@@ -168,11 +181,16 @@ fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn:
|
||||
}
|
||||
|
||||
#[put("/two-factor/yubikey", data = "<data>")]
|
||||
fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn)
|
||||
async fn activate_yubikey_put(
|
||||
data: JsonUpcase<EnableYubikeyData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
ip: ClientIp,
|
||||
) -> JsonResult {
|
||||
activate_yubikey(data, headers, conn, ip).await
|
||||
}
|
||||
|
||||
pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {
|
||||
pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {
|
||||
if response.len() != 44 {
|
||||
err!("Invalid Yubikey OTP length");
|
||||
}
|
||||
@@ -184,7 +202,7 @@ pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResu
|
||||
err!("Given Yubikey is not registered");
|
||||
}
|
||||
|
||||
let result = verify_yubikey_otp(response.to_owned());
|
||||
let result = verify_yubikey_otp(response.to_owned()).await;
|
||||
|
||||
match result {
|
||||
Ok(_answer) => Ok(()),
|
||||
|
||||
639
src/api/icons.rs
@@ -1,16 +1,25 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::prelude::*,
|
||||
net::{IpAddr, ToSocketAddrs},
|
||||
sync::{Arc, RwLock},
|
||||
net::IpAddr,
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::{stream::StreamExt, TryFutureExt};
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::{blocking::Client, blocking::Response, header};
|
||||
use rocket::{http::ContentType, response::Content, Route};
|
||||
use reqwest::{
|
||||
header::{self, HeaderMap, HeaderValue},
|
||||
Client, Response,
|
||||
};
|
||||
use rocket::{http::ContentType, response::Redirect, Route};
|
||||
use tokio::{
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
net::lookup_host,
|
||||
};
|
||||
|
||||
use html5gum::{Emitter, EndTag, HtmlString, InfallibleTokenizer, Readable, StartTag, StringReader, Tokenizer};
|
||||
|
||||
use crate::{
|
||||
error::Error,
|
||||
@@ -19,54 +28,97 @@ use crate::{
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![icon]
|
||||
match CONFIG.icon_service().as_str() {
|
||||
"internal" => routes![icon_internal],
|
||||
_ => routes![icon_external],
|
||||
}
|
||||
}
|
||||
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
// Generate the default headers
|
||||
let mut default_headers = header::HeaderMap::new();
|
||||
default_headers
|
||||
.insert(header::USER_AGENT, header::HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)"));
|
||||
default_headers
|
||||
.insert(header::ACCEPT, header::HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en,*;q=0.1"));
|
||||
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
|
||||
let mut default_headers = HeaderMap::new();
|
||||
default_headers.insert(header::USER_AGENT, HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)"));
|
||||
default_headers.insert(header::ACCEPT, HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1"));
|
||||
default_headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en,*;q=0.1"));
|
||||
default_headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-cache"));
|
||||
default_headers.insert(header::PRAGMA, HeaderValue::from_static("no-cache"));
|
||||
|
||||
// Generate the cookie store
|
||||
let cookie_store = Arc::new(Jar::default());
|
||||
|
||||
// Reuse the client between requests
|
||||
get_reqwest_client_builder()
|
||||
.cookie_provider(Arc::new(Jar::default()))
|
||||
let client = get_reqwest_client_builder()
|
||||
.cookie_provider(Arc::clone(&cookie_store))
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(default_headers)
|
||||
.build()
|
||||
.expect("Failed to build icon client")
|
||||
.default_headers(default_headers.clone());
|
||||
|
||||
match client.build() {
|
||||
Ok(client) => client,
|
||||
Err(e) => {
|
||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
||||
get_reqwest_client_builder()
|
||||
.cookie_provider(cookie_store)
|
||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||
.default_headers(default_headers)
|
||||
.trust_dns(false)
|
||||
.build()
|
||||
.expect("Failed to build client")
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Build Regex only once since this takes a lot of time.
|
||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||
static ICON_REL_BLACKLIST: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)mask-icon").unwrap());
|
||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||
|
||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
|
||||
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
|
||||
|
||||
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||
if !is_valid_domain(domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return None;
|
||||
}
|
||||
|
||||
if is_domain_blacklisted(domain).await {
|
||||
return None;
|
||||
}
|
||||
|
||||
let url = template.replace("{}", domain);
|
||||
match CONFIG.icon_redirect_code() {
|
||||
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
||||
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
||||
307 => Some(Redirect::temporary(url)),
|
||||
308 => Some(Redirect::permanent(url)),
|
||||
_ => {
|
||||
error!("Unexpected redirect code {}", CONFIG.icon_redirect_code());
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
|
||||
async fn icon_external(domain: String) -> Option<Redirect> {
|
||||
icon_redirect(&domain, &CONFIG._icon_service_url()).await
|
||||
}
|
||||
|
||||
#[get("/<domain>/icon.png")]
|
||||
async fn icon_internal(domain: String) -> Cached<(ContentType, Vec<u8>)> {
|
||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||
|
||||
if !is_valid_domain(&domain) {
|
||||
warn!("Invalid domain: {}", domain);
|
||||
return Cached::ttl(
|
||||
Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
|
||||
CONFIG.icon_cache_negttl(),
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
match get_icon(&domain) {
|
||||
match get_icon(&domain).await {
|
||||
Some((icon, icon_type)) => {
|
||||
Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl())
|
||||
Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
|
||||
}
|
||||
_ => Cached::ttl(Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl()),
|
||||
_ => Cached::ttl((ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl(), true),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,68 +258,59 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
|
||||
&& (domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|x| {
|
||||
for ip_port in x {
|
||||
if !is_global(ip_port.ip()) {
|
||||
warn!("IP {} for domain '{}' is not a global IP!", ip_port.ip(), domain);
|
||||
return true;
|
||||
}
|
||||
use cached::proc_macro::cached;
|
||||
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
|
||||
async fn is_domain_blacklisted(domain: &str) -> bool {
|
||||
// First check the blacklist regex if there is a match.
|
||||
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
|
||||
regex.is_match(domain)
|
||||
} else {
|
||||
// Clear the current list if the previous key doesn't exists.
|
||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||
if ICON_BLACKLIST_REGEX.len() >= 1 {
|
||||
ICON_BLACKLIST_REGEX.clear();
|
||||
}
|
||||
|
||||
// Generate the regex to store in too the Lazy Static HashMap.
|
||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||
let is_match = blacklist_regex.is_match(domain);
|
||||
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
|
||||
|
||||
is_match
|
||||
};
|
||||
|
||||
if is_match {
|
||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if CONFIG.icon_blacklist_non_global_ips() {
|
||||
if let Ok(s) = lookup_host((domain, 0)).await {
|
||||
for addr in s {
|
||||
if !is_global(addr.ip()) {
|
||||
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
|
||||
return true;
|
||||
}
|
||||
false
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
// Skip the regex check if the previous one is true already
|
||||
if !is_blacklisted {
|
||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||
let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||
let regex = if let Some(regex) = regex_hashmap.get(&blacklist) {
|
||||
regex
|
||||
} else {
|
||||
drop(regex_hashmap);
|
||||
|
||||
let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().unwrap();
|
||||
// Clear the current list if the previous key doesn't exists.
|
||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||
if regex_hashmap_write.len() >= 1 {
|
||||
regex_hashmap_write.clear();
|
||||
}
|
||||
|
||||
// Generate the regex to store in too the Lazy Static HashMap.
|
||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||
regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex);
|
||||
drop(regex_hashmap_write);
|
||||
|
||||
regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||
regex_hashmap.get(&blacklist).unwrap()
|
||||
};
|
||||
|
||||
// Use the pre-generate Regex stored in a Lazy HashMap.
|
||||
if regex.is_match(domain) {
|
||||
warn!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
||||
is_blacklisted = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
is_blacklisted
|
||||
false
|
||||
}
|
||||
|
||||
fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||
|
||||
// Check for expiration of negatively cached copy
|
||||
if icon_is_negcached(&path) {
|
||||
if icon_is_negcached(&path).await {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(icon) = get_cached_icon(&path) {
|
||||
if let Some(icon) = get_cached_icon(&path).await {
|
||||
let icon_type = match get_icon_type(&icon) {
|
||||
Some(x) => x,
|
||||
_ => "x-icon",
|
||||
@@ -280,31 +323,31 @@ fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
}
|
||||
|
||||
// Get the icon, or None in case of error
|
||||
match download_icon(domain) {
|
||||
match download_icon(domain).await {
|
||||
Ok((icon, icon_type)) => {
|
||||
save_icon(&path, &icon);
|
||||
Some((icon, icon_type.unwrap_or("x-icon").to_string()))
|
||||
save_icon(&path, &icon).await;
|
||||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error downloading icon: {:?}", e);
|
||||
warn!("Unable to download icon: {:?}", e);
|
||||
let miss_indicator = path + ".miss";
|
||||
save_icon(&miss_indicator, &[]);
|
||||
save_icon(&miss_indicator, &[]).await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
// Check for expiration of successfully cached copy
|
||||
if icon_is_expired(path) {
|
||||
if icon_is_expired(path).await {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Try to read the cached icon, and return it if it exists
|
||||
if let Ok(mut f) = File::open(path) {
|
||||
if let Ok(mut f) = File::open(path).await {
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
if f.read_to_end(&mut buffer).is_ok() {
|
||||
if f.read_to_end(&mut buffer).await.is_ok() {
|
||||
return Some(buffer);
|
||||
}
|
||||
}
|
||||
@@ -312,22 +355,22 @@ fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||
let meta = symlink_metadata(path)?;
|
||||
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||
let meta = symlink_metadata(path).await?;
|
||||
let modified = meta.modified()?;
|
||||
let age = SystemTime::now().duration_since(modified)?;
|
||||
|
||||
Ok(ttl > 0 && ttl <= age.as_secs())
|
||||
}
|
||||
|
||||
fn icon_is_negcached(path: &str) -> bool {
|
||||
async fn icon_is_negcached(path: &str) -> bool {
|
||||
let miss_indicator = path.to_owned() + ".miss";
|
||||
let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl());
|
||||
let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl()).await;
|
||||
|
||||
match expired {
|
||||
// No longer negatively cached, drop the marker
|
||||
Ok(true) => {
|
||||
if let Err(e) = remove_file(&miss_indicator) {
|
||||
if let Err(e) = remove_file(&miss_indicator).await {
|
||||
error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e);
|
||||
}
|
||||
false
|
||||
@@ -339,8 +382,8 @@ fn icon_is_negcached(path: &str) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
fn icon_is_expired(path: &str) -> bool {
|
||||
let expired = file_is_expired(path, CONFIG.icon_cache_ttl());
|
||||
async fn icon_is_expired(path: &str) -> bool {
|
||||
let expired = file_is_expired(path, CONFIG.icon_cache_ttl()).await;
|
||||
expired.unwrap_or(true)
|
||||
}
|
||||
|
||||
@@ -358,91 +401,62 @@ impl Icon {
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates over the HTML document to find <base href="http://domain.tld">
|
||||
/// When found it will stop the iteration and the found base href will be shared deref via `base_href`.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `node` - A Parsed HTML document via html5ever::parse_document()
|
||||
/// * `base_href` - a mutable url::Url which will be overwritten when a base href tag has been found.
|
||||
///
|
||||
fn get_base_href(node: &std::rc::Rc<markup5ever_rcdom::Node>, base_href: &mut url::Url) -> bool {
|
||||
if let markup5ever_rcdom::NodeData::Element {
|
||||
name,
|
||||
attrs,
|
||||
..
|
||||
} = &node.data
|
||||
{
|
||||
if name.local.as_ref() == "base" {
|
||||
let attrs = attrs.borrow();
|
||||
for attr in attrs.iter() {
|
||||
let attr_name = attr.name.local.as_ref();
|
||||
let attr_value = attr.value.as_ref();
|
||||
fn get_favicons_node(
|
||||
dom: InfallibleTokenizer<StringReader<'_>, FaviconEmitter>,
|
||||
icons: &mut Vec<Icon>,
|
||||
url: &url::Url,
|
||||
) {
|
||||
const TAG_LINK: &[u8] = b"link";
|
||||
const TAG_BASE: &[u8] = b"base";
|
||||
const TAG_HEAD: &[u8] = b"head";
|
||||
const ATTR_REL: &[u8] = b"rel";
|
||||
const ATTR_HREF: &[u8] = b"href";
|
||||
const ATTR_SIZES: &[u8] = b"sizes";
|
||||
|
||||
if attr_name == "href" {
|
||||
debug!("Found base href: {}", attr_value);
|
||||
*base_href = match base_href.join(attr_value) {
|
||||
Ok(href) => href,
|
||||
_ => base_href.clone(),
|
||||
};
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Might want to limit the recursion depth?
|
||||
for child in node.children.borrow().iter() {
|
||||
// Check if we got a true back and stop the iter.
|
||||
// This means we found a <base> tag and can stop processing the html.
|
||||
if get_base_href(child, base_href) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn get_favicons_node(node: &std::rc::Rc<markup5ever_rcdom::Node>, icons: &mut Vec<Icon>, url: &url::Url) {
|
||||
if let markup5ever_rcdom::NodeData::Element {
|
||||
name,
|
||||
attrs,
|
||||
..
|
||||
} = &node.data
|
||||
{
|
||||
if name.local.as_ref() == "link" {
|
||||
let mut has_rel = false;
|
||||
let mut href = None;
|
||||
let mut sizes = None;
|
||||
|
||||
let attrs = attrs.borrow();
|
||||
for attr in attrs.iter() {
|
||||
let attr_name = attr.name.local.as_ref();
|
||||
let attr_value = attr.value.as_ref();
|
||||
|
||||
if attr_name == "rel" && ICON_REL_REGEX.is_match(attr_value) && !ICON_REL_BLACKLIST.is_match(attr_value)
|
||||
let mut base_url = url.clone();
|
||||
let mut icon_tags: Vec<StartTag> = Vec::new();
|
||||
for token in dom {
|
||||
match token {
|
||||
FaviconToken::StartTag(tag) => {
|
||||
if *tag.name == TAG_LINK
|
||||
&& tag.attributes.contains_key(ATTR_REL)
|
||||
&& tag.attributes.contains_key(ATTR_HREF)
|
||||
{
|
||||
has_rel = true;
|
||||
} else if attr_name == "href" {
|
||||
href = Some(attr_value);
|
||||
} else if attr_name == "sizes" {
|
||||
sizes = Some(attr_value);
|
||||
let rel_value = std::str::from_utf8(tag.attributes.get(ATTR_REL).unwrap())
|
||||
.unwrap_or_default()
|
||||
.to_ascii_lowercase();
|
||||
if rel_value.contains("icon") && !rel_value.contains("mask-icon") {
|
||||
icon_tags.push(tag);
|
||||
}
|
||||
} else if *tag.name == TAG_BASE && tag.attributes.contains_key(ATTR_HREF) {
|
||||
let href = std::str::from_utf8(tag.attributes.get(ATTR_HREF).unwrap()).unwrap_or_default();
|
||||
debug!("Found base href: {href}");
|
||||
base_url = match base_url.join(href) {
|
||||
Ok(inner_url) => inner_url,
|
||||
_ => url.clone(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if has_rel {
|
||||
if let Some(inner_href) = href {
|
||||
if let Ok(full_href) = url.join(inner_href).map(String::from) {
|
||||
let priority = get_icon_priority(&full_href, sizes);
|
||||
icons.push(Icon::new(priority, full_href));
|
||||
}
|
||||
FaviconToken::EndTag(tag) => {
|
||||
if *tag.name == TAG_HEAD {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Might want to limit the recursion depth?
|
||||
for child in node.children.borrow().iter() {
|
||||
get_favicons_node(child, icons, url);
|
||||
for icon_tag in icon_tags {
|
||||
if let Some(icon_href) = icon_tag.attributes.get(ATTR_HREF) {
|
||||
if let Ok(full_href) = base_url.join(std::str::from_utf8(icon_href).unwrap_or_default()) {
|
||||
let sizes = if let Some(v) = icon_tag.attributes.get(ATTR_SIZES) {
|
||||
std::str::from_utf8(v).unwrap_or_default()
|
||||
} else {
|
||||
""
|
||||
};
|
||||
let priority = get_icon_priority(full_href.as_str(), sizes);
|
||||
icons.push(Icon::new(priority, full_href.to_string()));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -460,16 +474,16 @@ struct IconUrlResult {
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let icon_result = get_icon_url("github.com")?;
|
||||
/// let icon_result = get_icon_url("vaultwarden.discourse.group")?;
|
||||
/// let icon_result = get_icon_url("github.com").await?;
|
||||
/// let icon_result = get_icon_url("vaultwarden.discourse.group").await?;
|
||||
/// ```
|
||||
fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
// Default URL with secure and insecure schemes
|
||||
let ssldomain = format!("https://{}", domain);
|
||||
let httpdomain = format!("http://{}", domain);
|
||||
let ssldomain = format!("https://{domain}");
|
||||
let httpdomain = format!("http://{domain}");
|
||||
|
||||
// First check the domain as given during the request for both HTTPS and HTTP.
|
||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)) {
|
||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
|
||||
Ok(c) => Ok(c),
|
||||
Err(e) => {
|
||||
let mut sub_resp = Err(e);
|
||||
@@ -484,32 +498,31 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
base = domain_parts.next_back().unwrap()
|
||||
);
|
||||
if is_valid_domain(&base_domain) {
|
||||
let sslbase = format!("https://{}", base_domain);
|
||||
let httpbase = format!("http://{}", base_domain);
|
||||
debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain);
|
||||
let sslbase = format!("https://{base_domain}");
|
||||
let httpbase = format!("http://{base_domain}");
|
||||
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
||||
|
||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase));
|
||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
||||
}
|
||||
|
||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||
let www_domain = format!("www.{}", domain);
|
||||
let www_domain = format!("www.{domain}");
|
||||
if is_valid_domain(&www_domain) {
|
||||
let sslwww = format!("https://{}", www_domain);
|
||||
let httpwww = format!("http://{}", www_domain);
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain);
|
||||
let sslwww = format!("https://{www_domain}");
|
||||
let httpwww = format!("http://{www_domain}");
|
||||
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
||||
|
||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww));
|
||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
||||
}
|
||||
}
|
||||
|
||||
sub_resp
|
||||
}
|
||||
};
|
||||
|
||||
// Create the iconlist
|
||||
let mut iconlist: Vec<Icon> = Vec::new();
|
||||
let mut referer = String::from("");
|
||||
let mut referer = String::new();
|
||||
|
||||
if let Ok(content) = resp {
|
||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||
@@ -517,26 +530,23 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
|
||||
// Set the referer to be used on the final request, some sites check this.
|
||||
// Mostly used to prevent direct linking and other security resons.
|
||||
referer = url.as_str().to_string();
|
||||
referer = url.to_string();
|
||||
|
||||
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||
// Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from.
|
||||
iconlist.push(Icon::new(35, String::from(url.join("/favicon.ico").unwrap())));
|
||||
iconlist.push(Icon::new(40, String::from(url.join("/apple-touch-icon.png").unwrap())));
|
||||
|
||||
// 384KB should be more than enough for the HTML, though as we only really need the HTML header.
|
||||
let mut limited_reader = content.take(384 * 1024);
|
||||
let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec();
|
||||
|
||||
use html5ever::tendril::TendrilSink;
|
||||
let dom = html5ever::parse_document(markup5ever_rcdom::RcDom::default(), Default::default())
|
||||
.from_utf8()
|
||||
.read_from(&mut limited_reader)?;
|
||||
|
||||
let mut base_url: url::Url = url;
|
||||
get_base_href(&dom.document, &mut base_url);
|
||||
get_favicons_node(&dom.document, &mut iconlist, &base_url);
|
||||
let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()).infallible();
|
||||
get_favicons_node(dom, &mut iconlist, &url);
|
||||
} else {
|
||||
// Add the default favicon.ico to the list with just the given domain
|
||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", ssldomain)));
|
||||
iconlist.push(Icon::new(35, format!("{}/favicon.ico", httpdomain)));
|
||||
iconlist.push(Icon::new(35, format!("{ssldomain}/favicon.ico")));
|
||||
iconlist.push(Icon::new(40, format!("{ssldomain}/apple-touch-icon.png")));
|
||||
iconlist.push(Icon::new(35, format!("{httpdomain}/favicon.ico")));
|
||||
iconlist.push(Icon::new(40, format!("{httpdomain}/apple-touch-icon.png")));
|
||||
}
|
||||
|
||||
// Sort the iconlist by priority
|
||||
@@ -549,13 +559,13 @@ fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||
})
|
||||
}
|
||||
|
||||
fn get_page(url: &str) -> Result<Response, Error> {
|
||||
get_page_with_referer(url, "")
|
||||
async fn get_page(url: &str) -> Result<Response, Error> {
|
||||
get_page_with_referer(url, "").await
|
||||
}
|
||||
|
||||
fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||
err!("Favicon resolves to a blacklisted domain or IP!", url);
|
||||
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
||||
warn!("Favicon '{}' resolves to a blacklisted domain or IP!", url);
|
||||
}
|
||||
|
||||
let mut client = CLIENT.get(url);
|
||||
@@ -563,7 +573,7 @@ fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
client = client.header("Referer", referer)
|
||||
}
|
||||
|
||||
match client.send() {
|
||||
match client.send().await {
|
||||
Ok(c) => c.error_for_status().map_err(Into::into),
|
||||
Err(e) => err_silent!(format!("{}", e)),
|
||||
}
|
||||
@@ -581,7 +591,7 @@ fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||
/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32");
|
||||
/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", "");
|
||||
/// ```
|
||||
fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
|
||||
fn get_icon_priority(href: &str, sizes: &str) -> u8 {
|
||||
// Check if there is a dimension set
|
||||
let (width, height) = parse_sizes(sizes);
|
||||
|
||||
@@ -629,11 +639,11 @@ fn get_icon_priority(href: &str, sizes: Option<&str>) -> u8 {
|
||||
/// let (width, height) = parse_sizes("x128x128"); // (128, 128)
|
||||
/// let (width, height) = parse_sizes("32"); // (0, 0)
|
||||
/// ```
|
||||
fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||
fn parse_sizes(sizes: &str) -> (u16, u16) {
|
||||
let mut width: u16 = 0;
|
||||
let mut height: u16 = 0;
|
||||
|
||||
if let Some(sizes) = sizes {
|
||||
if !sizes.is_empty() {
|
||||
match ICON_SIZE_REGEX.captures(sizes.trim()) {
|
||||
None => {}
|
||||
Some(dimensions) => {
|
||||
@@ -648,14 +658,14 @@ fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
|
||||
(width, height)
|
||||
}
|
||||
|
||||
fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
if is_domain_blacklisted(domain) {
|
||||
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||
if is_domain_blacklisted(domain).await {
|
||||
err_silent!("Domain is blacklisted", domain)
|
||||
}
|
||||
|
||||
let icon_result = get_icon_url(domain)?;
|
||||
let icon_result = get_icon_url(domain).await?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let mut buffer = Bytes::new();
|
||||
let mut icon_type: Option<&str> = None;
|
||||
|
||||
use data_url::DataUrl;
|
||||
@@ -664,8 +674,12 @@ fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
if icon.href.starts_with("data:image") {
|
||||
let datauri = DataUrl::process(&icon.href).unwrap();
|
||||
// Check if we are able to decode the data uri
|
||||
match datauri.decode_to_vec() {
|
||||
Ok((body, _fragment)) => {
|
||||
let mut body = BytesMut::new();
|
||||
match datauri.decode::<_, ()>(|bytes| {
|
||||
body.extend_from_slice(bytes);
|
||||
Ok(())
|
||||
}) {
|
||||
Ok(_) => {
|
||||
// Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create
|
||||
if body.len() >= 67 {
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
@@ -675,16 +689,17 @@ fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
continue;
|
||||
}
|
||||
info!("Extracted icon from data:image uri for {}", domain);
|
||||
buffer = body;
|
||||
buffer = body.freeze();
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ => debug!("Extracted icon from data:image uri is invalid"),
|
||||
};
|
||||
} else {
|
||||
match get_page_with_referer(&icon.href, &icon_result.referer) {
|
||||
Ok(mut res) => {
|
||||
res.copy_to(&mut buffer)?;
|
||||
match get_page_with_referer(&icon.href, &icon_result.referer).await {
|
||||
Ok(res) => {
|
||||
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
||||
|
||||
// Check if the icon type is allowed, else try an icon from the list.
|
||||
icon_type = get_icon_type(&buffer);
|
||||
if icon_type.is_none() {
|
||||
@@ -707,16 +722,16 @@ fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
|
||||
Ok((buffer, icon_type))
|
||||
}
|
||||
|
||||
fn save_icon(path: &str, icon: &[u8]) {
|
||||
match File::create(path) {
|
||||
async fn save_icon(path: &str, icon: &[u8]) {
|
||||
match File::create(path).await {
|
||||
Ok(mut f) => {
|
||||
f.write_all(icon).expect("Error writing icon file");
|
||||
f.write_all(icon).await.expect("Error writing icon file");
|
||||
}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Icon save error: {:?}", e);
|
||||
warn!("Unable to save icon: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -733,13 +748,30 @@ fn get_icon_type(bytes: &[u8]) -> Option<&'static str> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Minimize the amount of bytes to be parsed from a reqwest result.
|
||||
/// This prevents very long parsing and memory usage.
|
||||
async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result<Bytes, reqwest::Error> {
|
||||
let mut stream = res.bytes_stream().take(max_size);
|
||||
let mut buf = BytesMut::new();
|
||||
let mut size = 0;
|
||||
while let Some(chunk) = stream.next().await {
|
||||
let chunk = &chunk?;
|
||||
size += chunk.len();
|
||||
buf.extend(chunk);
|
||||
if size >= max_size {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
/// This is an implementation of the default Cookie Jar from Reqwest and reqwest_cookie_store build by pfernie.
|
||||
/// The default cookie jar used by Reqwest keeps all the cookies based upon the Max-Age or Expires which could be a long time.
|
||||
/// That could be used for tracking, to prevent this we force the lifespan of the cookies to always be max two minutes.
|
||||
/// A Cookie Jar is needed because some sites force a redirect with cookies to verify if a request uses cookies or not.
|
||||
use cookie_store::CookieStore;
|
||||
#[derive(Default)]
|
||||
pub struct Jar(RwLock<CookieStore>);
|
||||
pub struct Jar(std::sync::RwLock<CookieStore>);
|
||||
|
||||
impl reqwest::cookie::CookieStore for Jar {
|
||||
fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) {
|
||||
@@ -762,8 +794,6 @@ impl reqwest::cookie::CookieStore for Jar {
|
||||
}
|
||||
|
||||
fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> {
|
||||
use bytes::Bytes;
|
||||
|
||||
let cookie_store = self.0.read().unwrap();
|
||||
let s = cookie_store
|
||||
.get_request_values(url)
|
||||
@@ -778,3 +808,158 @@ impl reqwest::cookie::CookieStore for Jar {
|
||||
header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// Custom FaviconEmitter for the html5gum parser.
|
||||
/// The FaviconEmitter is using an almost 1:1 copy of the DefaultEmitter with some small changes.
|
||||
/// This prevents emitting tags like comments, doctype and also strings between the tags.
|
||||
/// Therefor parsing the HTML content is faster.
|
||||
use std::collections::{BTreeSet, VecDeque};
|
||||
|
||||
#[derive(Debug)]
|
||||
enum FaviconToken {
|
||||
StartTag(StartTag),
|
||||
EndTag(EndTag),
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct FaviconEmitter {
|
||||
current_token: Option<FaviconToken>,
|
||||
last_start_tag: HtmlString,
|
||||
current_attribute: Option<(HtmlString, HtmlString)>,
|
||||
seen_attributes: BTreeSet<HtmlString>,
|
||||
emitted_tokens: VecDeque<FaviconToken>,
|
||||
}
|
||||
|
||||
impl FaviconEmitter {
|
||||
fn emit_token(&mut self, token: FaviconToken) {
|
||||
self.emitted_tokens.push_front(token);
|
||||
}
|
||||
|
||||
fn flush_current_attribute(&mut self) {
|
||||
if let Some((k, v)) = self.current_attribute.take() {
|
||||
match self.current_token {
|
||||
Some(FaviconToken::StartTag(ref mut tag)) => {
|
||||
tag.attributes.entry(k).and_modify(|_| {}).or_insert(v);
|
||||
}
|
||||
Some(FaviconToken::EndTag(_)) => {
|
||||
self.seen_attributes.insert(k);
|
||||
}
|
||||
_ => {
|
||||
debug_assert!(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Emitter for FaviconEmitter {
|
||||
type Token = FaviconToken;
|
||||
|
||||
fn set_last_start_tag(&mut self, last_start_tag: Option<&[u8]>) {
|
||||
self.last_start_tag.clear();
|
||||
self.last_start_tag.extend(last_start_tag.unwrap_or_default());
|
||||
}
|
||||
|
||||
fn pop_token(&mut self) -> Option<Self::Token> {
|
||||
self.emitted_tokens.pop_back()
|
||||
}
|
||||
|
||||
fn init_start_tag(&mut self) {
|
||||
self.current_token = Some(FaviconToken::StartTag(StartTag::default()));
|
||||
}
|
||||
|
||||
fn init_end_tag(&mut self) {
|
||||
self.current_token = Some(FaviconToken::EndTag(EndTag::default()));
|
||||
self.seen_attributes.clear();
|
||||
}
|
||||
|
||||
fn emit_current_tag(&mut self) -> Option<html5gum::State> {
|
||||
self.flush_current_attribute();
|
||||
let mut token = self.current_token.take().unwrap();
|
||||
let mut emit = false;
|
||||
match token {
|
||||
FaviconToken::EndTag(ref mut tag) => {
|
||||
// Always clean seen attributes
|
||||
self.seen_attributes.clear();
|
||||
|
||||
// Only trigger an emit for the </head> tag.
|
||||
// This is matched, and will break the for-loop.
|
||||
if *tag.name == b"head" {
|
||||
emit = true;
|
||||
}
|
||||
}
|
||||
FaviconToken::StartTag(ref mut tag) => {
|
||||
// Only trriger an emit for <link> and <base> tags.
|
||||
// These are the only tags we want to parse.
|
||||
if *tag.name == b"link" || *tag.name == b"base" {
|
||||
self.set_last_start_tag(Some(&tag.name));
|
||||
emit = true;
|
||||
} else {
|
||||
self.set_last_start_tag(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only emit the tags we want to parse.
|
||||
if emit {
|
||||
self.emit_token(token);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn push_tag_name(&mut self, s: &[u8]) {
|
||||
match self.current_token {
|
||||
Some(
|
||||
FaviconToken::StartTag(StartTag {
|
||||
ref mut name,
|
||||
..
|
||||
})
|
||||
| FaviconToken::EndTag(EndTag {
|
||||
ref mut name,
|
||||
..
|
||||
}),
|
||||
) => {
|
||||
name.extend(s);
|
||||
}
|
||||
_ => debug_assert!(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn init_attribute(&mut self) {
|
||||
self.flush_current_attribute();
|
||||
self.current_attribute = Some(Default::default());
|
||||
}
|
||||
|
||||
fn push_attribute_name(&mut self, s: &[u8]) {
|
||||
self.current_attribute.as_mut().unwrap().0.extend(s);
|
||||
}
|
||||
|
||||
fn push_attribute_value(&mut self, s: &[u8]) {
|
||||
self.current_attribute.as_mut().unwrap().1.extend(s);
|
||||
}
|
||||
|
||||
fn current_is_appropriate_end_tag_token(&mut self) -> bool {
|
||||
match self.current_token {
|
||||
Some(FaviconToken::EndTag(ref tag)) => !self.last_start_tag.is_empty() && self.last_start_tag == tag.name,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
// We do not want and need these parts of the HTML document
|
||||
// These will be skipped and ignored during the tokenization and iteration.
|
||||
fn emit_current_comment(&mut self) {}
|
||||
fn emit_current_doctype(&mut self) {}
|
||||
fn emit_eof(&mut self) {}
|
||||
fn emit_error(&mut self, _: html5gum::Error) {}
|
||||
fn emit_string(&mut self, _: &[u8]) {}
|
||||
fn init_comment(&mut self) {}
|
||||
fn init_doctype(&mut self) {}
|
||||
fn push_comment(&mut self, _: &[u8]) {}
|
||||
fn push_doctype_name(&mut self, _: &[u8]) {}
|
||||
fn push_doctype_public_identifier(&mut self, _: &[u8]) {}
|
||||
fn push_doctype_system_identifier(&mut self, _: &[u8]) {}
|
||||
fn set_doctype_public_identifier(&mut self, _: &[u8]) {}
|
||||
fn set_doctype_system_identifier(&mut self, _: &[u8]) {}
|
||||
fn set_force_quirks(&mut self) {}
|
||||
fn set_self_closing(&mut self) {}
|
||||
}
|
||||
|
||||
@@ -1,35 +1,39 @@
|
||||
use chrono::Local;
|
||||
use chrono::Utc;
|
||||
use num_traits::FromPrimitive;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{
|
||||
request::{Form, FormItems, FromForm},
|
||||
form::{Form, FromForm},
|
||||
Route,
|
||||
};
|
||||
use rocket_contrib::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
core::accounts::{PreloginData, RegisterData, _prelogin, _register},
|
||||
core::log_user_event,
|
||||
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
|
||||
ApiResult, EmptyResult, JsonResult,
|
||||
ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
||||
},
|
||||
auth::ClientIp,
|
||||
auth::{ClientHeaders, ClientIp},
|
||||
db::{models::*, DbConn},
|
||||
error::MapResult,
|
||||
mail, util, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![login]
|
||||
routes![login, prelogin, identity_register]
|
||||
}
|
||||
|
||||
#[post("/connect/token", data = "<data>")]
|
||||
fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
let data: ConnectData = data.into_inner();
|
||||
|
||||
match data.grant_type.as_ref() {
|
||||
let mut user_uuid: Option<String> = None;
|
||||
|
||||
let login_result = match data.grant_type.as_ref() {
|
||||
"refresh_token" => {
|
||||
_check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
|
||||
_refresh_login(data, conn)
|
||||
_refresh_login(data, &mut conn).await
|
||||
}
|
||||
"password" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
@@ -41,26 +45,57 @@ fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
|
||||
_check_is_some(&data.device_name, "device_name cannot be blank")?;
|
||||
_check_is_some(&data.device_type, "device_type cannot be blank")?;
|
||||
|
||||
_password_login(data, conn, &ip)
|
||||
_password_login(data, &mut user_uuid, &mut conn, &ip).await
|
||||
}
|
||||
"client_credentials" => {
|
||||
_check_is_some(&data.client_id, "client_id cannot be blank")?;
|
||||
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
|
||||
_check_is_some(&data.scope, "scope cannot be blank")?;
|
||||
|
||||
_api_key_login(data, &mut user_uuid, &mut conn, &ip).await
|
||||
}
|
||||
t => err!("Invalid type", t),
|
||||
};
|
||||
|
||||
if let Some(user_uuid) = user_uuid {
|
||||
match &login_result {
|
||||
Ok(_) => {
|
||||
log_user_event(
|
||||
EventType::UserLoggedIn as i32,
|
||||
&user_uuid,
|
||||
client_header.device_type,
|
||||
&ip.ip,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Err(e) => {
|
||||
if let Some(ev) = e.get_event() {
|
||||
log_user_event(ev.event as i32, &user_uuid, client_header.device_type, &ip.ip, &mut conn).await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
login_result
|
||||
}
|
||||
|
||||
fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
||||
// Extract token
|
||||
let token = data.refresh_token.unwrap();
|
||||
|
||||
// Get device by refresh token
|
||||
let mut device = Device::find_by_refresh_token(&token, &conn).map_res("Invalid refresh token")?;
|
||||
let mut device = Device::find_by_refresh_token(&token, conn).await.map_res("Invalid refresh token")?;
|
||||
|
||||
// COMMON
|
||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn);
|
||||
let scope = "api offline_access";
|
||||
let scope_vec = vec!["api".into(), "offline_access".into()];
|
||||
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||
// Common
|
||||
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
device.save(&conn)?;
|
||||
Ok(Json(json!({
|
||||
"access_token": access_token,
|
||||
"expires_in": expires_in,
|
||||
@@ -72,40 +107,63 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||
"scope": "api offline_access",
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
}
|
||||
|
||||
fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
|
||||
async fn _password_login(
|
||||
data: ConnectData,
|
||||
user_uuid: &mut Option<String>,
|
||||
conn: &mut DbConn,
|
||||
ip: &ClientIp,
|
||||
) -> JsonResult {
|
||||
// Validate scope
|
||||
let scope = data.scope.as_ref().unwrap();
|
||||
if scope != "api offline_access" {
|
||||
err!("Scope not supported")
|
||||
}
|
||||
let scope_vec = vec!["api".into(), "offline_access".into()];
|
||||
|
||||
// Ratelimit the login
|
||||
crate::ratelimit::check_limit_login(&ip.ip)?;
|
||||
|
||||
// Get the user
|
||||
let username = data.username.as_ref().unwrap();
|
||||
let user = match User::find_by_mail(username, &conn) {
|
||||
let username = data.username.as_ref().unwrap().trim();
|
||||
let user = match User::find_by_mail(username, conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
|
||||
};
|
||||
|
||||
// Set the user_uuid here to be passed back used for event logging.
|
||||
*user_uuid = Some(user.uuid.clone());
|
||||
|
||||
// Check password
|
||||
let password = data.password.as_ref().unwrap();
|
||||
if !user.check_valid_password(password) {
|
||||
err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
err!(
|
||||
"Username or password is incorrect. Try again",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!("This user has been disabled", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
err!(
|
||||
"This user has been disabled",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let now = Local::now();
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||
let now = now.naive_utc();
|
||||
if user.last_verifying_at.is_none()
|
||||
|| now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds()
|
||||
> CONFIG.signups_verify_resend_time() as i64
|
||||
@@ -118,39 +176,49 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
user.last_verifying_at = Some(now);
|
||||
user.login_verify_count += 1;
|
||||
|
||||
if let Err(e) = user.save(&conn) {
|
||||
if let Err(e) = user.save(conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
}
|
||||
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await {
|
||||
error!("Error auto-sending email verification email: {:#?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We still want the login to fail until they actually verified the email address
|
||||
err!("Please verify your email before trying again.", format!("IP: {}. Username: {}.", ip.ip, username))
|
||||
err!(
|
||||
"Please verify your email before trying again.",
|
||||
format!("IP: {}. Username: {}.", ip.ip, username),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let (mut device, new_device) = get_device(&data, &conn, &user);
|
||||
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
||||
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &conn)?;
|
||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
err!("Could not send login notification email. Please contact your administrator.")
|
||||
err!(
|
||||
"Could not send login notification email. Please contact your administrator.",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Common
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn);
|
||||
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||
device.save(&conn)?;
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
let mut result = json!({
|
||||
"access_token": access_token,
|
||||
@@ -164,7 +232,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false,// TODO: Same as above
|
||||
"scope": "api offline_access",
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
});
|
||||
|
||||
@@ -176,26 +244,113 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
||||
Ok(Json(result))
|
||||
}
|
||||
|
||||
async fn _api_key_login(
|
||||
data: ConnectData,
|
||||
user_uuid: &mut Option<String>,
|
||||
conn: &mut DbConn,
|
||||
ip: &ClientIp,
|
||||
) -> JsonResult {
|
||||
// Validate scope
|
||||
let scope = data.scope.as_ref().unwrap();
|
||||
if scope != "api" {
|
||||
err!("Scope not supported")
|
||||
}
|
||||
let scope_vec = vec!["api".into()];
|
||||
|
||||
// Ratelimit the login
|
||||
crate::ratelimit::check_limit_login(&ip.ip)?;
|
||||
|
||||
// Get the user via the client_id
|
||||
let client_id = data.client_id.as_ref().unwrap();
|
||||
let client_user_uuid = match client_id.strip_prefix("user.") {
|
||||
Some(uuid) => uuid,
|
||||
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
|
||||
};
|
||||
let user = match User::find_by_uuid(client_user_uuid, conn).await {
|
||||
Some(user) => user,
|
||||
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
|
||||
};
|
||||
|
||||
// Set the user_uuid here to be passed back used for event logging.
|
||||
*user_uuid = Some(user.uuid.clone());
|
||||
|
||||
// Check if the user is disabled
|
||||
if !user.enabled {
|
||||
err!(
|
||||
"This user has been disabled (API key login)",
|
||||
format!("IP: {}. Username: {}.", ip.ip, user.email),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// Check API key. Note that API key logins bypass 2FA.
|
||||
let client_secret = data.client_secret.as_ref().unwrap();
|
||||
if !user.check_valid_api_key(client_secret) {
|
||||
err!(
|
||||
"Incorrect client_secret",
|
||||
format!("IP: {}. Username: {}.", ip.ip, user.email),
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
||||
|
||||
if CONFIG.mail_enabled() && new_device {
|
||||
let now = Utc::now().naive_utc();
|
||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
||||
error!("Error sending new device email: {:#?}", e);
|
||||
|
||||
if CONFIG.require_device_email() {
|
||||
err!(
|
||||
"Could not send login notification email. Please contact your administrator.",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Common
|
||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
||||
device.save(conn).await?;
|
||||
|
||||
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
||||
|
||||
// Note: No refresh_token is returned. The CLI just repeats the
|
||||
// client_credentials login flow when the existing token expires.
|
||||
Ok(Json(json!({
|
||||
"access_token": access_token,
|
||||
"expires_in": expires_in,
|
||||
"token_type": "Bearer",
|
||||
"Key": user.akey,
|
||||
"PrivateKey": user.private_key,
|
||||
|
||||
"Kdf": user.client_kdf_type,
|
||||
"KdfIterations": user.client_kdf_iter,
|
||||
"ResetMasterPassword": false, // TODO: Same as above
|
||||
"scope": scope,
|
||||
"unofficialServer": true,
|
||||
})))
|
||||
}
|
||||
|
||||
/// Retrieves an existing device or creates a new device from ConnectData and the User
|
||||
fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
|
||||
async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Device, bool) {
|
||||
// On iOS, device_type sends "iOS", on others it sends a number
|
||||
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
|
||||
// When unknown or unable to parse, return 14, which is 'Unknown Browser'
|
||||
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(14);
|
||||
let device_id = data.device_identifier.clone().expect("No device id provided");
|
||||
let device_name = data.device_name.clone().expect("No device name provided");
|
||||
|
||||
let mut new_device = false;
|
||||
// Find device or create new
|
||||
let device = match Device::find_by_uuid(&device_id, conn) {
|
||||
Some(device) => {
|
||||
// Check if owned device, and recreate if not
|
||||
if device.user_uuid != user.uuid {
|
||||
info!("Device exists but is owned by another user. The old device will be discarded");
|
||||
new_device = true;
|
||||
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||
} else {
|
||||
device
|
||||
}
|
||||
}
|
||||
let device = match Device::find_by_uuid_and_user(&device_id, &user.uuid, conn).await {
|
||||
Some(device) => device,
|
||||
None => {
|
||||
new_device = true;
|
||||
Device::new(device_id, user.uuid.clone(), device_name, device_type)
|
||||
@@ -205,26 +360,28 @@ fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool)
|
||||
(device, new_device)
|
||||
}
|
||||
|
||||
fn twofactor_auth(
|
||||
async fn twofactor_auth(
|
||||
user_uuid: &str,
|
||||
data: &ConnectData,
|
||||
device: &mut Device,
|
||||
ip: &ClientIp,
|
||||
conn: &DbConn,
|
||||
conn: &mut DbConn,
|
||||
) -> ApiResult<Option<String>> {
|
||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn);
|
||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn).await;
|
||||
|
||||
// No twofactor token if twofactor is disabled
|
||||
if twofactors.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
|
||||
|
||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
|
||||
|
||||
let twofactor_code = match data.two_factor_token {
|
||||
Some(ref code) => code,
|
||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA token not provided"),
|
||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, "2FA token not provided"),
|
||||
};
|
||||
|
||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||
@@ -237,16 +394,17 @@ fn twofactor_auth(
|
||||
|
||||
match TwoFactorType::from_i32(selected_id) {
|
||||
Some(TwoFactorType::Authenticator) => {
|
||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn)?
|
||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn).await?
|
||||
}
|
||||
Some(TwoFactorType::U2f) => _tf::u2f::validate_u2f_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::Webauthn) => _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn)?,
|
||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?)?,
|
||||
Some(TwoFactorType::Webauthn) => {
|
||||
_tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await?
|
||||
}
|
||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
||||
Some(TwoFactorType::Duo) => {
|
||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap(), twofactor_code, conn)?
|
||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
||||
}
|
||||
Some(TwoFactorType::Email) => {
|
||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn)?
|
||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn).await?
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Remember) => {
|
||||
@@ -255,13 +413,23 @@ fn twofactor_auth(
|
||||
remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time
|
||||
}
|
||||
_ => {
|
||||
err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn)?, "2FA Remember token not provided")
|
||||
err_json!(
|
||||
_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?,
|
||||
"2FA Remember token not provided"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => err!("Invalid two factor provider"),
|
||||
_ => err!(
|
||||
"Invalid two factor provider",
|
||||
ErrorEvent {
|
||||
event: EventType::UserFailedLogIn2fa
|
||||
}
|
||||
),
|
||||
}
|
||||
|
||||
TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?;
|
||||
|
||||
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
||||
Ok(Some(device.refresh_twofactor_remember()))
|
||||
} else {
|
||||
@@ -274,7 +442,7 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
||||
tf.map(|t| t.data).map_res("Two factor doesn't exist")
|
||||
}
|
||||
|
||||
fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> ApiResult<Value> {
|
||||
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
|
||||
use crate::api::core::two_factor;
|
||||
|
||||
let mut result = json!({
|
||||
@@ -290,38 +458,18 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
match TwoFactorType::from_i32(*provider) {
|
||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||
|
||||
Some(TwoFactorType::U2f) if CONFIG.domain_set() => {
|
||||
let request = two_factor::u2f::generate_u2f_login(user_uuid, conn)?;
|
||||
let mut challenge_list = Vec::new();
|
||||
|
||||
for key in request.registered_keys {
|
||||
challenge_list.push(json!({
|
||||
"appId": request.app_id,
|
||||
"challenge": request.challenge,
|
||||
"version": key.version,
|
||||
"keyHandle": key.key_handle,
|
||||
}));
|
||||
}
|
||||
|
||||
let challenge_list_str = serde_json::to_string(&challenge_list).unwrap();
|
||||
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Challenges": challenge_list_str,
|
||||
});
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn)?;
|
||||
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||
}
|
||||
|
||||
Some(TwoFactorType::Duo) => {
|
||||
let email = match User::find_by_uuid(user_uuid, conn) {
|
||||
let email = match User::find_by_uuid(user_uuid, conn).await {
|
||||
Some(u) => u.email,
|
||||
None => err!("User does not exist"),
|
||||
};
|
||||
|
||||
let (signature, host) = duo::generate_duo_signature(&email, conn)?;
|
||||
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
|
||||
|
||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||
"Host": host,
|
||||
@@ -330,7 +478,7 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
}
|
||||
|
||||
Some(tf_type @ TwoFactorType::YubiKey) => {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("No YubiKey devices registered"),
|
||||
};
|
||||
@@ -345,14 +493,14 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
Some(tf_type @ TwoFactorType::Email) => {
|
||||
use crate::api::core::two_factor as _tf;
|
||||
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn) {
|
||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||
Some(tf) => tf,
|
||||
None => err!("No twofactor email registered"),
|
||||
};
|
||||
|
||||
// Send email immediately if email is the only 2FA option
|
||||
if providers.len() == 1 {
|
||||
_tf::email::send_token(user_uuid, conn)?
|
||||
_tf::email::send_token(user_uuid, conn).await?
|
||||
}
|
||||
|
||||
let email_data = EmailTokenData::from_json(&twofactor.data)?;
|
||||
@@ -368,62 +516,68 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ConnectData {
|
||||
grant_type: String, // refresh_token, password
|
||||
|
||||
// Needed for grant_type="refresh_token"
|
||||
refresh_token: Option<String>,
|
||||
|
||||
// Needed for grant_type="password"
|
||||
client_id: Option<String>, // web, cli, desktop, browser, mobile
|
||||
password: Option<String>,
|
||||
scope: Option<String>,
|
||||
username: Option<String>,
|
||||
|
||||
device_identifier: Option<String>,
|
||||
device_name: Option<String>,
|
||||
device_type: Option<String>,
|
||||
device_push_token: Option<String>, // Unused; mobile device push not yet supported.
|
||||
|
||||
// Needed for two-factor auth
|
||||
two_factor_provider: Option<i32>,
|
||||
two_factor_token: Option<String>,
|
||||
two_factor_remember: Option<i32>,
|
||||
#[post("/accounts/prelogin", data = "<data>")]
|
||||
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||
_prelogin(data, conn).await
|
||||
}
|
||||
|
||||
impl<'f> FromForm<'f> for ConnectData {
|
||||
type Error = String;
|
||||
#[post("/accounts/register", data = "<data>")]
|
||||
async fn identity_register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
||||
_register(data, conn).await
|
||||
}
|
||||
|
||||
fn from_form(items: &mut FormItems<'f>, _strict: bool) -> Result<Self, Self::Error> {
|
||||
let mut form = Self::default();
|
||||
for item in items {
|
||||
let (key, value) = item.key_value_decoded();
|
||||
let mut normalized_key = key.to_lowercase();
|
||||
normalized_key.retain(|c| c != '_'); // Remove '_'
|
||||
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
|
||||
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
||||
#[derive(Debug, Clone, Default, FromForm)]
|
||||
#[allow(non_snake_case)]
|
||||
struct ConnectData {
|
||||
#[field(name = uncased("grant_type"))]
|
||||
#[field(name = uncased("granttype"))]
|
||||
grant_type: String, // refresh_token, password, client_credentials (API key)
|
||||
|
||||
match normalized_key.as_ref() {
|
||||
"granttype" => form.grant_type = value,
|
||||
"refreshtoken" => form.refresh_token = Some(value),
|
||||
"clientid" => form.client_id = Some(value),
|
||||
"password" => form.password = Some(value),
|
||||
"scope" => form.scope = Some(value),
|
||||
"username" => form.username = Some(value),
|
||||
"deviceidentifier" => form.device_identifier = Some(value),
|
||||
"devicename" => form.device_name = Some(value),
|
||||
"devicetype" => form.device_type = Some(value),
|
||||
"devicepushtoken" => form.device_push_token = Some(value),
|
||||
"twofactorprovider" => form.two_factor_provider = value.parse().ok(),
|
||||
"twofactortoken" => form.two_factor_token = Some(value),
|
||||
"twofactorremember" => form.two_factor_remember = value.parse().ok(),
|
||||
key => warn!("Detected unexpected parameter during login: {}", key),
|
||||
}
|
||||
}
|
||||
// Needed for grant_type="refresh_token"
|
||||
#[field(name = uncased("refresh_token"))]
|
||||
#[field(name = uncased("refreshtoken"))]
|
||||
refresh_token: Option<String>,
|
||||
|
||||
Ok(form)
|
||||
}
|
||||
// Needed for grant_type = "password" | "client_credentials"
|
||||
#[field(name = uncased("client_id"))]
|
||||
#[field(name = uncased("clientid"))]
|
||||
client_id: Option<String>, // web, cli, desktop, browser, mobile
|
||||
#[field(name = uncased("client_secret"))]
|
||||
#[field(name = uncased("clientsecret"))]
|
||||
client_secret: Option<String>,
|
||||
#[field(name = uncased("password"))]
|
||||
password: Option<String>,
|
||||
#[field(name = uncased("scope"))]
|
||||
scope: Option<String>,
|
||||
#[field(name = uncased("username"))]
|
||||
username: Option<String>,
|
||||
|
||||
#[field(name = uncased("device_identifier"))]
|
||||
#[field(name = uncased("deviceidentifier"))]
|
||||
device_identifier: Option<String>,
|
||||
#[field(name = uncased("device_name"))]
|
||||
#[field(name = uncased("devicename"))]
|
||||
device_name: Option<String>,
|
||||
#[field(name = uncased("device_type"))]
|
||||
#[field(name = uncased("devicetype"))]
|
||||
device_type: Option<String>,
|
||||
#[allow(unused)]
|
||||
#[field(name = uncased("device_push_token"))]
|
||||
#[field(name = uncased("devicepushtoken"))]
|
||||
_device_push_token: Option<String>, // Unused; mobile device push not yet supported.
|
||||
|
||||
// Needed for two-factor auth
|
||||
#[field(name = uncased("two_factor_provider"))]
|
||||
#[field(name = uncased("twofactorprovider"))]
|
||||
two_factor_provider: Option<i32>,
|
||||
#[field(name = uncased("two_factor_token"))]
|
||||
#[field(name = uncased("twofactortoken"))]
|
||||
two_factor_token: Option<String>,
|
||||
#[field(name = uncased("two_factor_remember"))]
|
||||
#[field(name = uncased("twofactorremember"))]
|
||||
two_factor_remember: Option<i32>,
|
||||
}
|
||||
|
||||
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {
|
||||
|
||||
@@ -5,20 +5,26 @@ mod identity;
|
||||
mod notifications;
|
||||
mod web;
|
||||
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::serde::json::Json;
|
||||
use serde_json::Value;
|
||||
|
||||
pub use crate::api::{
|
||||
admin::catchers as admin_catchers,
|
||||
admin::routes as admin_routes,
|
||||
core::catchers as core_catchers,
|
||||
core::purge_sends,
|
||||
core::purge_trashed_ciphers,
|
||||
core::routes as core_routes,
|
||||
core::two_factor::send_incomplete_2fa_notifications,
|
||||
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
|
||||
core::{event_cleanup_job, events_routes as core_events_routes},
|
||||
icons::routes as icons_routes,
|
||||
identity::routes as identity_routes,
|
||||
notifications::routes as notifications_routes,
|
||||
notifications::{start_notification_server, Notify, UpdateType},
|
||||
web::catchers as web_catchers,
|
||||
web::routes as web_routes,
|
||||
web::static_files,
|
||||
};
|
||||
use crate::util;
|
||||
|
||||
@@ -29,6 +35,7 @@ pub type EmptyResult = ApiResult<()>;
|
||||
|
||||
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
||||
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
|
||||
type JsonVec<T> = Json<Vec<T>>;
|
||||
|
||||
// Common structs representing JSON data received
|
||||
#[derive(Deserialize)]
|
||||
|
||||
@@ -1,19 +1,41 @@
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use rocket::Route;
|
||||
use rocket_contrib::json::Json;
|
||||
use chrono::NaiveDateTime;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use rmpv::Value;
|
||||
use rocket::{serde::json::Json, Route};
|
||||
use serde_json::Value as JsonValue;
|
||||
use tokio::{
|
||||
net::{TcpListener, TcpStream},
|
||||
sync::mpsc::Sender,
|
||||
};
|
||||
use tokio_tungstenite::{
|
||||
accept_hdr_async,
|
||||
tungstenite::{handshake, Message},
|
||||
};
|
||||
|
||||
use crate::{api::EmptyResult, auth::Headers, db::DbConn, Error, CONFIG};
|
||||
use crate::{
|
||||
api::EmptyResult,
|
||||
auth::Headers,
|
||||
db::models::{Cipher, Folder, Send, User},
|
||||
Error, CONFIG,
|
||||
};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![negotiate, websockets_err]
|
||||
}
|
||||
|
||||
static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
||||
|
||||
#[get("/hub")]
|
||||
fn websockets_err() -> EmptyResult {
|
||||
static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
||||
|
||||
if CONFIG.websocket_enabled()
|
||||
&& SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok()
|
||||
{
|
||||
@@ -30,11 +52,11 @@ fn websockets_err() -> EmptyResult {
|
||||
}
|
||||
|
||||
#[post("/hub/negotiate")]
|
||||
fn negotiate(_headers: Headers, _conn: DbConn) -> Json<JsonValue> {
|
||||
fn negotiate(_headers: Headers) -> Json<JsonValue> {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64URL;
|
||||
|
||||
let conn_id = BASE64URL.encode(&crypto::get_random(vec![0u8; 16]));
|
||||
let conn_id = crypto::encode_random_bytes::<16>(BASE64URL);
|
||||
let mut available_transports: Vec<JsonValue> = Vec::new();
|
||||
|
||||
if CONFIG.websocket_enabled() {
|
||||
@@ -55,19 +77,6 @@ fn negotiate(_headers: Headers, _conn: DbConn) -> Json<JsonValue> {
|
||||
//
|
||||
// Websockets server
|
||||
//
|
||||
use std::io;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
use ws::{self, util::Token, Factory, Handler, Handshake, Message, Sender};
|
||||
|
||||
use chashmap::CHashMap;
|
||||
use chrono::NaiveDateTime;
|
||||
use serde_json::from_str;
|
||||
|
||||
use crate::db::models::{Cipher, Folder, Send, User};
|
||||
|
||||
use rmpv::Value;
|
||||
|
||||
fn serialize(val: Value) -> Vec<u8> {
|
||||
use rmpv::encode::write_value;
|
||||
@@ -118,192 +127,49 @@ fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
|
||||
}
|
||||
}
|
||||
|
||||
// Server WebSocket handler
|
||||
pub struct WsHandler {
|
||||
out: Sender,
|
||||
user_uuid: Option<String>,
|
||||
users: WebSocketUsers,
|
||||
}
|
||||
|
||||
const RECORD_SEPARATOR: u8 = 0x1e;
|
||||
const INITIAL_RESPONSE: [u8; 3] = [0x7b, 0x7d, RECORD_SEPARATOR]; // {, }, <RS>
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct InitialMessage {
|
||||
protocol: String,
|
||||
#[derive(Deserialize, Copy, Clone, Eq, PartialEq)]
|
||||
struct InitialMessage<'a> {
|
||||
protocol: &'a str,
|
||||
version: i32,
|
||||
}
|
||||
|
||||
const PING_MS: u64 = 15_000;
|
||||
const PING: Token = Token(1);
|
||||
|
||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||
|
||||
impl WsHandler {
|
||||
fn err(&self, msg: &'static str) -> ws::Result<()> {
|
||||
self.out.close(ws::CloseCode::Invalid)?;
|
||||
|
||||
// We need to specifically return an IO error so ws closes the connection
|
||||
let io_error = io::Error::from(io::ErrorKind::InvalidData);
|
||||
Err(ws::Error::new(ws::ErrorKind::Io(io_error), msg))
|
||||
}
|
||||
|
||||
fn get_request_token(&self, hs: Handshake) -> Option<String> {
|
||||
use std::str::from_utf8;
|
||||
|
||||
// Verify we have a token header
|
||||
if let Some(header_value) = hs.request.header("Authorization") {
|
||||
if let Ok(converted) = from_utf8(header_value) {
|
||||
if let Some(token_part) = converted.split("Bearer ").nth(1) {
|
||||
return Some(token_part.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Otherwise verify the query parameter value
|
||||
let path = hs.request.resource();
|
||||
if let Some(params) = path.split('?').nth(1) {
|
||||
let params_iter = params.split('&').take(1);
|
||||
for val in params_iter {
|
||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
||||
return Some(stripped.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Handler for WsHandler {
|
||||
fn on_open(&mut self, hs: Handshake) -> ws::Result<()> {
|
||||
// Path == "/notifications/hub?id=<id>==&access_token=<access_token>"
|
||||
//
|
||||
// We don't use `id`, and as of around 2020-03-25, the official clients
|
||||
// no longer seem to pass `id` (only `access_token`).
|
||||
|
||||
// Get user token from header or query parameter
|
||||
let access_token = match self.get_request_token(hs) {
|
||||
Some(token) => token,
|
||||
_ => return self.err("Missing access token"),
|
||||
};
|
||||
|
||||
// Validate the user
|
||||
use crate::auth;
|
||||
let claims = match auth::decode_login(access_token.as_str()) {
|
||||
Ok(claims) => claims,
|
||||
Err(_) => return self.err("Invalid access token provided"),
|
||||
};
|
||||
|
||||
// Assign the user to the handler
|
||||
let user_uuid = claims.sub;
|
||||
self.user_uuid = Some(user_uuid.clone());
|
||||
|
||||
// Add the current Sender to the user list
|
||||
let handler_insert = self.out.clone();
|
||||
let handler_update = self.out.clone();
|
||||
|
||||
self.users.map.upsert(user_uuid, || vec![handler_insert], |ref mut v| v.push(handler_update));
|
||||
|
||||
// Schedule a ping to keep the connection alive
|
||||
self.out.timeout(PING_MS, PING)
|
||||
}
|
||||
|
||||
fn on_message(&mut self, msg: Message) -> ws::Result<()> {
|
||||
if let Message::Text(text) = msg.clone() {
|
||||
let json = &text[..text.len() - 1]; // Remove last char
|
||||
|
||||
if let Ok(InitialMessage {
|
||||
protocol,
|
||||
version,
|
||||
}) = from_str::<InitialMessage>(json)
|
||||
{
|
||||
if &protocol == "messagepack" && version == 1 {
|
||||
return self.out.send(&INITIAL_RESPONSE[..]); // Respond to initial message
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If it's not the initial message, just echo the message
|
||||
self.out.send(msg)
|
||||
}
|
||||
|
||||
fn on_timeout(&mut self, event: Token) -> ws::Result<()> {
|
||||
if event == PING {
|
||||
// send ping
|
||||
self.out.send(create_ping())?;
|
||||
|
||||
// reschedule the timeout
|
||||
self.out.timeout(PING_MS, PING)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct WsFactory {
|
||||
pub users: WebSocketUsers,
|
||||
}
|
||||
|
||||
impl WsFactory {
|
||||
pub fn init() -> Self {
|
||||
WsFactory {
|
||||
users: WebSocketUsers {
|
||||
map: Arc::new(CHashMap::new()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Factory for WsFactory {
|
||||
type Handler = WsHandler;
|
||||
|
||||
fn connection_made(&mut self, out: Sender) -> Self::Handler {
|
||||
WsHandler {
|
||||
out,
|
||||
user_uuid: None,
|
||||
users: self.users.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn connection_lost(&mut self, handler: Self::Handler) {
|
||||
// Remove handler
|
||||
if let Some(user_uuid) = &handler.user_uuid {
|
||||
if let Some(mut user_conn) = self.users.map.get_mut(user_uuid) {
|
||||
if let Some(pos) = user_conn.iter().position(|x| x == &handler.out) {
|
||||
user_conn.remove(pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
static INITIAL_MESSAGE: InitialMessage<'static> = InitialMessage {
|
||||
protocol: "messagepack",
|
||||
version: 1,
|
||||
};
|
||||
|
||||
// We attach the UUID to the sender so we can differentiate them when we need to remove them from the Vec
|
||||
type UserSenders = (uuid::Uuid, Sender<Message>);
|
||||
#[derive(Clone)]
|
||||
pub struct WebSocketUsers {
|
||||
map: Arc<CHashMap<String, Vec<Sender>>>,
|
||||
map: Arc<dashmap::DashMap<String, Vec<UserSenders>>>,
|
||||
}
|
||||
|
||||
impl WebSocketUsers {
|
||||
fn send_update(&self, user_uuid: &str, data: &[u8]) -> ws::Result<()> {
|
||||
if let Some(user) = self.map.get(user_uuid) {
|
||||
for sender in user.iter() {
|
||||
sender.send(data)?;
|
||||
async fn send_update(&self, user_uuid: &str, data: &[u8]) {
|
||||
if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) {
|
||||
for (_, sender) in user.iter() {
|
||||
if sender.send(Message::binary(data)).await.is_err() {
|
||||
// TODO: Delete from map here too?
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// NOTE: The last modified date needs to be updated before calling these methods
|
||||
pub fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||
let data = create_update(
|
||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||
ut,
|
||||
);
|
||||
|
||||
self.send_update(&user.uuid, &data).ok();
|
||||
self.send_update(&user.uuid, &data).await;
|
||||
}
|
||||
|
||||
pub fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||
pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder) {
|
||||
let data = create_update(
|
||||
vec![
|
||||
("Id".into(), folder.uuid.clone().into()),
|
||||
@@ -313,10 +179,10 @@ impl WebSocketUsers {
|
||||
ut,
|
||||
);
|
||||
|
||||
self.send_update(&folder.user_uuid, &data).ok();
|
||||
self.send_update(&folder.user_uuid, &data).await;
|
||||
}
|
||||
|
||||
pub fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &[String]) {
|
||||
pub async fn send_cipher_update(&self, ut: UpdateType, cipher: &Cipher, user_uuids: &[String]) {
|
||||
let user_uuid = convert_option(cipher.user_uuid.clone());
|
||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||
|
||||
@@ -332,11 +198,11 @@ impl WebSocketUsers {
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).ok();
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_send_update(&self, ut: UpdateType, send: &Send, user_uuids: &[String]) {
|
||||
pub async fn send_send_update(&self, ut: UpdateType, send: &Send, user_uuids: &[String]) {
|
||||
let user_uuid = convert_option(send.user_uuid.clone());
|
||||
|
||||
let data = create_update(
|
||||
@@ -349,7 +215,7 @@ impl WebSocketUsers {
|
||||
);
|
||||
|
||||
for uuid in user_uuids {
|
||||
self.send_update(uuid, &data).ok();
|
||||
self.send_update(uuid, &data).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -392,7 +258,7 @@ fn create_ping() -> Vec<u8> {
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(PartialEq)]
|
||||
#[derive(Eq, PartialEq)]
|
||||
pub enum UpdateType {
|
||||
CipherUpdate = 0,
|
||||
CipherCreate = 1,
|
||||
@@ -416,28 +282,145 @@ pub enum UpdateType {
|
||||
None = 100,
|
||||
}
|
||||
|
||||
use rocket::State;
|
||||
pub type Notify<'a> = State<'a, WebSocketUsers>;
|
||||
pub type Notify<'a> = &'a rocket::State<WebSocketUsers>;
|
||||
|
||||
pub fn start_notification_server() -> WebSocketUsers {
|
||||
let factory = WsFactory::init();
|
||||
let users = factory.users.clone();
|
||||
let users = WebSocketUsers {
|
||||
map: Arc::new(dashmap::DashMap::new()),
|
||||
};
|
||||
|
||||
if CONFIG.websocket_enabled() {
|
||||
thread::spawn(move || {
|
||||
let mut settings = ws::Settings::default();
|
||||
settings.max_connections = 500;
|
||||
settings.queue_size = 2;
|
||||
settings.panic_on_internal = false;
|
||||
let users2 = users.clone();
|
||||
tokio::spawn(async move {
|
||||
let addr = (CONFIG.websocket_address(), CONFIG.websocket_port());
|
||||
info!("Starting WebSockets server on {}:{}", addr.0, addr.1);
|
||||
let listener = TcpListener::bind(addr).await.expect("Can't listen on websocket port");
|
||||
|
||||
ws::Builder::new()
|
||||
.with_settings(settings)
|
||||
.build(factory)
|
||||
.unwrap()
|
||||
.listen((CONFIG.websocket_address().as_str(), CONFIG.websocket_port()))
|
||||
.unwrap();
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>();
|
||||
CONFIG.set_ws_shutdown_handle(shutdown_tx);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
Ok((stream, addr)) = listener.accept() => {
|
||||
tokio::spawn(handle_connection(stream, users2.clone(), addr));
|
||||
}
|
||||
|
||||
_ = &mut shutdown_rx => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Shutting down WebSockets server!")
|
||||
});
|
||||
}
|
||||
|
||||
users
|
||||
}
|
||||
|
||||
async fn handle_connection(stream: TcpStream, users: WebSocketUsers, addr: SocketAddr) -> Result<(), Error> {
|
||||
let mut user_uuid: Option<String> = None;
|
||||
|
||||
info!("Accepting WS connection from {addr}");
|
||||
|
||||
// Accept connection, do initial handshake, validate auth token and get the user ID
|
||||
use handshake::server::{Request, Response};
|
||||
let mut stream = accept_hdr_async(stream, |req: &Request, res: Response| {
|
||||
if let Some(token) = get_request_token(req) {
|
||||
if let Ok(claims) = crate::auth::decode_login(&token) {
|
||||
user_uuid = Some(claims.sub);
|
||||
return Ok(res);
|
||||
}
|
||||
}
|
||||
Err(Response::builder().status(401).body(None).unwrap())
|
||||
})
|
||||
.await?;
|
||||
|
||||
let user_uuid = user_uuid.expect("User UUID should be set after the handshake");
|
||||
|
||||
// Add a channel to send messages to this client to the map
|
||||
let entry_uuid = uuid::Uuid::new_v4();
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(100);
|
||||
users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx));
|
||||
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
tokio::select! {
|
||||
res = stream.next() => {
|
||||
match res {
|
||||
Some(Ok(message)) => {
|
||||
// Respond to any pings
|
||||
if let Message::Ping(ping) = message {
|
||||
if stream.send(Message::Pong(ping)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
} else if let Message::Pong(_) = message {
|
||||
/* Ignored */
|
||||
continue;
|
||||
}
|
||||
|
||||
// We should receive an initial message with the protocol and version, and we will reply to it
|
||||
if let Message::Text(ref message) = message {
|
||||
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
||||
|
||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
||||
stream.send(Message::binary(INITIAL_RESPONSE)).await?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Just echo anything else the client sends
|
||||
if stream.send(message).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
res = rx.recv() => {
|
||||
match res {
|
||||
Some(res) => {
|
||||
if stream.send(res).await.is_err() {
|
||||
break;
|
||||
}
|
||||
},
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
_= interval.tick() => {
|
||||
if stream.send(Message::Ping(create_ping())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Closing WS connection from {addr}");
|
||||
|
||||
// Delete from map
|
||||
users.map.entry(user_uuid).or_default().retain(|(uuid, _)| uuid != &entry_uuid);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_request_token(req: &handshake::server::Request) -> Option<String> {
|
||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||
|
||||
if let Some(Ok(auth)) = req.headers().get("Authorization").map(|a| a.to_str()) {
|
||||
if let Some(token_part) = auth.strip_prefix("Bearer ") {
|
||||
return Some(token_part.to_owned());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(params) = req.uri().query() {
|
||||
let params_iter = params.split('&').take(1);
|
||||
for val in params_iter {
|
||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
||||
return Some(stripped.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
125
src/api/web.rs
@@ -1,10 +1,10 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rocket::{http::ContentType, response::content::Content, response::NamedFile, Route};
|
||||
use rocket_contrib::json::Json;
|
||||
use rocket::{fs::NamedFile, http::ContentType, response::content::RawHtml as Html, serde::json::Json, Catcher, Route};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
api::{core::now, ApiResult},
|
||||
error::Error,
|
||||
util::{Cached, SafeString},
|
||||
CONFIG,
|
||||
@@ -20,80 +20,95 @@ pub fn routes() -> Vec<Route> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn catchers() -> Vec<Catcher> {
|
||||
if CONFIG.web_vault_enabled() {
|
||||
catchers![not_found]
|
||||
} else {
|
||||
catchers![]
|
||||
}
|
||||
}
|
||||
|
||||
#[catch(404)]
|
||||
fn not_found() -> ApiResult<Html<String>> {
|
||||
// Return the page
|
||||
let json = json!({
|
||||
"urlpath": CONFIG.domain_path()
|
||||
});
|
||||
let text = CONFIG.render_template("404", &json)?;
|
||||
Ok(Html(text))
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
fn web_index() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).ok())
|
||||
async fn web_index() -> Cached<Option<NamedFile>> {
|
||||
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false)
|
||||
}
|
||||
|
||||
#[get("/app-id.json")]
|
||||
fn app_id() -> Cached<Content<Json<Value>>> {
|
||||
fn app_id() -> Cached<(ContentType, Json<Value>)> {
|
||||
let content_type = ContentType::new("application", "fido.trusted-apps+json");
|
||||
|
||||
Cached::long(Content(
|
||||
content_type,
|
||||
Json(json!({
|
||||
"trustedFacets": [
|
||||
{
|
||||
"version": { "major": 1, "minor": 0 },
|
||||
"ids": [
|
||||
// Per <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-appid-and-facets-v2.0-id-20180227.html#determining-the-facetid-of-a-calling-application>:
|
||||
//
|
||||
// "In the Web case, the FacetID MUST be the Web Origin [RFC6454]
|
||||
// of the web page triggering the FIDO operation, written as
|
||||
// a URI with an empty path. Default ports are omitted and any
|
||||
// path component is ignored."
|
||||
//
|
||||
// This leaves it unclear as to whether the path must be empty,
|
||||
// or whether it can be non-empty and will be ignored. To be on
|
||||
// the safe side, use a proper web origin (with empty path).
|
||||
&CONFIG.domain_origin(),
|
||||
"ios:bundle-id:com.8bit.bitwarden",
|
||||
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
|
||||
}]
|
||||
})),
|
||||
))
|
||||
Cached::long(
|
||||
(
|
||||
content_type,
|
||||
Json(json!({
|
||||
"trustedFacets": [
|
||||
{
|
||||
"version": { "major": 1, "minor": 0 },
|
||||
"ids": [
|
||||
// Per <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-appid-and-facets-v2.0-id-20180227.html#determining-the-facetid-of-a-calling-application>:
|
||||
//
|
||||
// "In the Web case, the FacetID MUST be the Web Origin [RFC6454]
|
||||
// of the web page triggering the FIDO operation, written as
|
||||
// a URI with an empty path. Default ports are omitted and any
|
||||
// path component is ignored."
|
||||
//
|
||||
// This leaves it unclear as to whether the path must be empty,
|
||||
// or whether it can be non-empty and will be ignored. To be on
|
||||
// the safe side, use a proper web origin (with empty path).
|
||||
&CONFIG.domain_origin(),
|
||||
"ios:bundle-id:com.8bit.bitwarden",
|
||||
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
|
||||
}]
|
||||
})),
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
#[get("/<p..>", rank = 10)] // Only match this if the other routes don't match
|
||||
fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).ok())
|
||||
async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
|
||||
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true)
|
||||
}
|
||||
|
||||
#[get("/attachments/<uuid>/<file_id>")]
|
||||
fn attachments(uuid: SafeString, file_id: SafeString) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).ok()
|
||||
async fn attachments(uuid: SafeString, file_id: SafeString) -> Option<NamedFile> {
|
||||
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok()
|
||||
}
|
||||
|
||||
// We use DbConn here to let the alive healthcheck also verify the database connection.
|
||||
use crate::db::DbConn;
|
||||
#[get("/alive")]
|
||||
fn alive(_conn: DbConn) -> Json<String> {
|
||||
use crate::util::format_date;
|
||||
use chrono::Utc;
|
||||
|
||||
Json(format_date(&Utc::now().naive_utc()))
|
||||
now()
|
||||
}
|
||||
|
||||
#[get("/bwrs_static/<filename>")]
|
||||
fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
||||
#[get("/vw_static/<filename>")]
|
||||
pub fn static_files(filename: String) -> Result<(ContentType, &'static [u8]), Error> {
|
||||
match filename.as_ref() {
|
||||
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
"error-x.svg" => Ok(Content(ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||
"hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
"vaultwarden-icon.png" => {
|
||||
Ok(Content(ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png")))
|
||||
}
|
||||
|
||||
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => {
|
||||
Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js")))
|
||||
}
|
||||
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
||||
"datatables.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.6.0.slim.js" => {
|
||||
Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.0.slim.js")))
|
||||
"404.png" => Ok((ContentType::PNG, include_bytes!("../static/images/404.png"))),
|
||||
"mail-github.png" => Ok((ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),
|
||||
"logo-gray.png" => Ok((ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))),
|
||||
"error-x.svg" => Ok((ContentType::SVG, include_bytes!("../static/images/error-x.svg"))),
|
||||
"hibp.png" => Ok((ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||
"vaultwarden-icon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png"))),
|
||||
"vaultwarden-favicon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-favicon.png"))),
|
||||
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||
"bootstrap-native.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))),
|
||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||
"jquery-3.6.2.slim.js" => {
|
||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.6.2.slim.js")))
|
||||
}
|
||||
_ => err!(format!("Static file not found: {}", filename)),
|
||||
}
|
||||
|
||||
367
src/auth.rs
@@ -1,19 +1,14 @@
|
||||
//
|
||||
// JWT Handling
|
||||
//
|
||||
use chrono::{Duration, Utc};
|
||||
use num_traits::FromPrimitive;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use jsonwebtoken::{self, Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::ser::Serialize;
|
||||
|
||||
use crate::{
|
||||
error::{Error, MapResult},
|
||||
util::read_file,
|
||||
CONFIG,
|
||||
};
|
||||
use crate::{error::Error, CONFIG};
|
||||
|
||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||
|
||||
@@ -30,13 +25,13 @@ static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.
|
||||
static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin()));
|
||||
|
||||
static PRIVATE_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
read_file(&CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key.\n{}", e))
|
||||
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key.\n{}", e))
|
||||
});
|
||||
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
|
||||
EncodingKey::from_rsa_pem(&PRIVATE_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{}", e))
|
||||
});
|
||||
static PUBLIC_RSA_KEY_VEC: Lazy<Vec<u8>> = Lazy::new(|| {
|
||||
read_file(&CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{}", e))
|
||||
std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key.\n{}", e))
|
||||
});
|
||||
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
|
||||
DecodingKey::from_rsa_pem(&PUBLIC_RSA_KEY_VEC).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{}", e))
|
||||
@@ -55,18 +50,22 @@ pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||
}
|
||||
|
||||
fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Error> {
|
||||
let validation = jsonwebtoken::Validation {
|
||||
leeway: 30, // 30 seconds
|
||||
validate_exp: true,
|
||||
validate_nbf: true,
|
||||
aud: None,
|
||||
iss: Some(issuer),
|
||||
sub: None,
|
||||
algorithms: vec![JWT_ALGORITHM],
|
||||
};
|
||||
let mut validation = jsonwebtoken::Validation::new(JWT_ALGORITHM);
|
||||
validation.leeway = 30; // 30 seconds
|
||||
validation.validate_exp = true;
|
||||
validation.validate_nbf = true;
|
||||
validation.set_issuer(&[issuer]);
|
||||
|
||||
let token = token.replace(char::is_whitespace, "");
|
||||
jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation).map(|d| d.claims).map_res("Error decoding JWT")
|
||||
match jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation) {
|
||||
Ok(d) => Ok(d.claims),
|
||||
Err(err) => match *err.kind() {
|
||||
ErrorKind::InvalidToken => err!("Token is invalid"),
|
||||
ErrorKind::InvalidIssuer => err!("Issuer is invalid"),
|
||||
ErrorKind::ExpiredSignature => err!("Token has expired"),
|
||||
_ => err!("Error decoding JWT"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_login(token: &str) -> Result<LoginJwtClaims, Error> {
|
||||
@@ -153,9 +152,10 @@ pub fn generate_invite_claims(
|
||||
invited_by_email: Option<String>,
|
||||
) -> InviteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
InviteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
iss: JWT_INVITE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
email,
|
||||
@@ -165,7 +165,6 @@ pub fn generate_invite_claims(
|
||||
}
|
||||
}
|
||||
|
||||
// var token = _dataProtector.Protect($"EmergencyAccessInvite {emergencyAccess.Id} {emergencyAccess.Email} {nowMillis}");
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct EmergencyAccessInviteJwtClaims {
|
||||
// Not before
|
||||
@@ -178,22 +177,23 @@ pub struct EmergencyAccessInviteJwtClaims {
|
||||
pub sub: String,
|
||||
|
||||
pub email: String,
|
||||
pub emer_id: Option<String>,
|
||||
pub grantor_name: Option<String>,
|
||||
pub grantor_email: Option<String>,
|
||||
pub emer_id: String,
|
||||
pub grantor_name: String,
|
||||
pub grantor_email: String,
|
||||
}
|
||||
|
||||
pub fn generate_emergency_access_invite_claims(
|
||||
uuid: String,
|
||||
email: String,
|
||||
emer_id: Option<String>,
|
||||
grantor_name: Option<String>,
|
||||
grantor_email: Option<String>,
|
||||
emer_id: String,
|
||||
grantor_name: String,
|
||||
grantor_email: String,
|
||||
) -> EmergencyAccessInviteJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
EmergencyAccessInviteJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
email,
|
||||
@@ -217,9 +217,10 @@ pub struct BasicJwtClaims {
|
||||
|
||||
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
iss: JWT_DELETE_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
}
|
||||
@@ -227,9 +228,10 @@ pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||
|
||||
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
||||
let time_now = Utc::now().naive_utc();
|
||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||
BasicJwtClaims {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + Duration::days(5)).timestamp(),
|
||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||
sub: uuid,
|
||||
}
|
||||
@@ -258,10 +260,13 @@ pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||
//
|
||||
// Bearer token authentication
|
||||
//
|
||||
use rocket::request::{FromRequest, Outcome, Request};
|
||||
use rocket::{
|
||||
outcome::try_outcome,
|
||||
request::{FromRequest, Outcome, Request},
|
||||
};
|
||||
|
||||
use crate::db::{
|
||||
models::{CollectionUser, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
|
||||
models::{Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
|
||||
DbConn,
|
||||
};
|
||||
|
||||
@@ -269,10 +274,11 @@ pub struct Host {
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Host {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for Host {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
// Get host
|
||||
@@ -309,23 +315,42 @@ impl<'a, 'r> FromRequest<'a, 'r> for Host {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ClientHeaders {
|
||||
pub host: String,
|
||||
pub device_type: i32,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for ClientHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let host = try_outcome!(Host::from_request(request).await).host;
|
||||
// When unknown or unable to parse, return 14, which is 'Unknown Browser'
|
||||
let device_type: i32 =
|
||||
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
|
||||
|
||||
Outcome::Success(ClientHeaders {
|
||||
host,
|
||||
device_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Headers {
|
||||
pub host: String,
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for Headers {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = request.headers();
|
||||
|
||||
let host = match Host::from_request(request) {
|
||||
Outcome::Forward(_) => return Outcome::Forward(()),
|
||||
Outcome::Failure(f) => return Outcome::Failure(f),
|
||||
Outcome::Success(host) => host.host,
|
||||
};
|
||||
let host = try_outcome!(Host::from_request(request).await).host;
|
||||
|
||||
// Get access_token
|
||||
let access_token: &str = match headers.get_one("Authorization") {
|
||||
@@ -345,17 +370,17 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
let device_uuid = claims.device;
|
||||
let user_uuid = claims.sub;
|
||||
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
let mut conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
let device = match Device::find_by_uuid(&device_uuid, &conn) {
|
||||
let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &mut conn).await {
|
||||
Some(device) => device,
|
||||
None => err_handler!("Invalid device id"),
|
||||
};
|
||||
|
||||
let user = match User::find_by_uuid(&user_uuid, &conn) {
|
||||
let user = match User::find_by_uuid(&user_uuid, &mut conn).await {
|
||||
Some(user) => user,
|
||||
None => err_handler!("Device has no user associated"),
|
||||
};
|
||||
@@ -364,7 +389,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
if let Some(stamp_exception) =
|
||||
user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||
{
|
||||
let current_route = match request.route().and_then(|r| r.name) {
|
||||
let current_route = match request.route().and_then(|r| r.name.as_deref()) {
|
||||
Some(name) => name,
|
||||
_ => err_handler!("Error getting current route for stamp exception"),
|
||||
};
|
||||
@@ -377,7 +402,7 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
||||
// This prevents checking this stamp exception for new requests.
|
||||
let mut user = user;
|
||||
user.reset_stamp_exception();
|
||||
if let Err(e) = user.save(&conn) {
|
||||
if let Err(e) = user.save(&mut conn).await {
|
||||
error!("Error updating user: {:#?}", e);
|
||||
}
|
||||
err_handler!("Stamp exception is expired")
|
||||
@@ -411,14 +436,14 @@ pub struct OrgHeaders {
|
||||
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||
// but there are cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_org_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
|
||||
fn get_org_id(request: &Request<'_>) -> Option<String> {
|
||||
if let Some(Ok(org_id)) = request.param::<String>(1) {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
return Some(org_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(org_id)) = request.get_query_value::<String>("organizationId") {
|
||||
if let Some(Ok(org_id)) = request.query_value::<String>("organizationId") {
|
||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||
return Some(org_id);
|
||||
}
|
||||
@@ -427,52 +452,48 @@ fn get_org_id(request: &Request) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for OrgHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<Headers>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
match get_org_id(request) {
|
||||
Some(org_id) => {
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(Headers::from_request(request).await);
|
||||
match get_org_id(request) {
|
||||
Some(org_id) => {
|
||||
let mut conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
let user = headers.user;
|
||||
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &conn) {
|
||||
Some(user) => {
|
||||
if user.status == UserOrgStatus::Confirmed as i32 {
|
||||
user
|
||||
} else {
|
||||
err_handler!("The current user isn't confirmed member of the organization")
|
||||
}
|
||||
}
|
||||
None => err_handler!("The current user isn't member of the organization"),
|
||||
};
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user,
|
||||
org_user_type: {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
||||
org_usr_type
|
||||
} else {
|
||||
// This should only happen if the DB is corrupted
|
||||
err_handler!("Unknown user type in the database")
|
||||
}
|
||||
},
|
||||
org_user,
|
||||
org_id,
|
||||
})
|
||||
let user = headers.user;
|
||||
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await {
|
||||
Some(user) => {
|
||||
if user.status == UserOrgStatus::Confirmed as i32 {
|
||||
user
|
||||
} else {
|
||||
err_handler!("The current user isn't confirmed member of the organization")
|
||||
}
|
||||
}
|
||||
_ => err_handler!("Error getting the organization id"),
|
||||
}
|
||||
None => err_handler!("The current user isn't member of the organization"),
|
||||
};
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user,
|
||||
org_user_type: {
|
||||
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) {
|
||||
org_usr_type
|
||||
} else {
|
||||
// This should only happen if the DB is corrupted
|
||||
err_handler!("Unknown user type in the database")
|
||||
}
|
||||
},
|
||||
org_user,
|
||||
org_id,
|
||||
})
|
||||
}
|
||||
_ => err_handler!("Error getting the organization id"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -482,27 +503,26 @@ pub struct AdminHeaders {
|
||||
pub device: Device,
|
||||
pub user: User,
|
||||
pub org_user_type: UserOrgType,
|
||||
pub client_version: Option<String>,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for AdminHeaders {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for AdminHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Admin {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
let client_version = request.headers().get_one("Bitwarden-Client-Version").map(String::from);
|
||||
if headers.org_user_type >= UserOrgType::Admin {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
client_version,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -520,14 +540,14 @@ impl From<AdminHeaders> for Headers {
|
||||
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
||||
// but there could be cases where it is a query value.
|
||||
// First check the path, if this is not a valid uuid, try the query values.
|
||||
fn get_col_id(request: &Request) -> Option<String> {
|
||||
if let Some(Ok(col_id)) = request.get_param::<String>(3) {
|
||||
fn get_col_id(request: &Request<'_>) -> Option<String> {
|
||||
if let Some(Ok(col_id)) = request.param::<String>(3) {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Ok(col_id)) = request.get_query_value::<String>("collectionId") {
|
||||
if let Some(Ok(col_id)) = request.query_value::<String>("collectionId") {
|
||||
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||
return Some(col_id);
|
||||
}
|
||||
@@ -546,46 +566,42 @@ pub struct ManagerHeaders {
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
match get_col_id(request) {
|
||||
Some(col_id) => {
|
||||
let conn = match request.guard::<DbConn>() {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
match get_col_id(request) {
|
||||
Some(col_id) => {
|
||||
let mut conn = match DbConn::from_request(request).await {
|
||||
Outcome::Success(conn) => conn,
|
||||
_ => err_handler!("Error getting DB"),
|
||||
};
|
||||
|
||||
if !headers.org_user.has_full_access() {
|
||||
match CollectionUser::find_by_collection_and_user(
|
||||
&col_id,
|
||||
&headers.org_user.user_uuid,
|
||||
&conn,
|
||||
) {
|
||||
Some(_) => (),
|
||||
None => err_handler!("The current user isn't a manager for this collection"),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => err_handler!("Error getting the collection id"),
|
||||
if !headers.org_user.has_full_access()
|
||||
&& !Collection::has_access_by_collection_and_user_uuid(
|
||||
&col_id,
|
||||
&headers.org_user.user_uuid,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
{
|
||||
err_handler!("The current user isn't a manager for this collection")
|
||||
}
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
_ => err_handler!("Error getting the collection id"),
|
||||
}
|
||||
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -609,25 +625,21 @@ pub struct ManagerHeadersLoose {
|
||||
pub org_user_type: UserOrgType,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeadersLoose {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for ManagerHeadersLoose {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type >= UserOrgType::Manager {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
org_user_type: headers.org_user_type,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -648,24 +660,20 @@ pub struct OwnerHeaders {
|
||||
pub user: User,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for OwnerHeaders {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for OwnerHeaders {
|
||||
type Error = &'static str;
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
match request.guard::<OrgHeaders>() {
|
||||
Outcome::Forward(_) => Outcome::Forward(()),
|
||||
Outcome::Failure(f) => Outcome::Failure(f),
|
||||
Outcome::Success(headers) => {
|
||||
if headers.org_user_type == UserOrgType::Owner {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||
if headers.org_user_type == UserOrgType::Owner {
|
||||
Outcome::Success(Self {
|
||||
host: headers.host,
|
||||
device: headers.device,
|
||||
user: headers.user,
|
||||
})
|
||||
} else {
|
||||
err_handler!("You need to be Owner to call this endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -679,10 +687,11 @@ pub struct ClientIp {
|
||||
pub ip: IpAddr,
|
||||
}
|
||||
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for ClientIp {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for ClientIp {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(req: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
let ip = if CONFIG._ip_header_enabled() {
|
||||
req.headers().get_one(&CONFIG.ip_header()).and_then(|ip| {
|
||||
match ip.find(',') {
|
||||
|
||||
460
src/config.rs
@@ -1,8 +1,8 @@
|
||||
use std::process::exit;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use job_scheduler_ng::Schedule;
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use reqwest::Url;
|
||||
|
||||
use crate::{
|
||||
@@ -23,21 +23,6 @@ pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
||||
})
|
||||
});
|
||||
|
||||
static PRIVACY_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"[\w]").unwrap());
|
||||
const PRIVACY_CONFIG: &[&str] = &[
|
||||
"allowed_iframe_ancestors",
|
||||
"database_url",
|
||||
"domain_origin",
|
||||
"domain_path",
|
||||
"domain",
|
||||
"helo_name",
|
||||
"org_creation_users",
|
||||
"signups_domains_whitelist",
|
||||
"smtp_from",
|
||||
"smtp_host",
|
||||
"smtp_username",
|
||||
];
|
||||
|
||||
pub type Pass = String;
|
||||
|
||||
macro_rules! make_config {
|
||||
@@ -52,6 +37,9 @@ macro_rules! make_config {
|
||||
pub struct Config { inner: RwLock<Inner> }
|
||||
|
||||
struct Inner {
|
||||
rocket_shutdown_handle: Option<rocket::Shutdown>,
|
||||
ws_shutdown_handle: Option<tokio::sync::oneshot::Sender<()>>,
|
||||
|
||||
templates: Handlebars<'static>,
|
||||
config: ConfigItems,
|
||||
|
||||
@@ -61,7 +49,7 @@ macro_rules! make_config {
|
||||
_overrides: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
||||
#[derive(Clone, Default, Deserialize, Serialize)]
|
||||
pub struct ConfigBuilder {
|
||||
$($(
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -72,13 +60,13 @@ macro_rules! make_config {
|
||||
impl ConfigBuilder {
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
fn from_env() -> Self {
|
||||
match dotenv::from_path(".env") {
|
||||
match dotenvy::from_path(get_env("ENV_FILE").unwrap_or_else(|| String::from(".env"))) {
|
||||
Ok(_) => (),
|
||||
Err(e) => match e {
|
||||
dotenv::Error::LineParse(msg, pos) => {
|
||||
dotenvy::Error::LineParse(msg, pos) => {
|
||||
panic!("Error loading the .env file:\nNear {:?} on position {}\nPlease fix and restart!\n", msg, pos);
|
||||
},
|
||||
dotenv::Error::Io(ioerr) => match ioerr.kind() {
|
||||
dotenvy::Error::Io(ioerr) => match ioerr.kind() {
|
||||
std::io::ErrorKind::NotFound => {
|
||||
println!("[INFO] No .env file found.\n");
|
||||
},
|
||||
@@ -97,15 +85,14 @@ macro_rules! make_config {
|
||||
|
||||
let mut builder = ConfigBuilder::default();
|
||||
$($(
|
||||
builder.$name = make_config! { @getenv &stringify!($name).to_uppercase(), $ty };
|
||||
builder.$name = make_config! { @getenv paste::paste!(stringify!([<$name:upper>])), $ty };
|
||||
)+)+
|
||||
|
||||
builder
|
||||
}
|
||||
|
||||
fn from_file(path: &str) -> Result<Self, Error> {
|
||||
use crate::util::read_file_string;
|
||||
let config_str = read_file_string(path)?;
|
||||
let config_str = std::fs::read_to_string(path)?;
|
||||
serde_json::from_str(&config_str).map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -118,7 +105,7 @@ macro_rules! make_config {
|
||||
builder.$name = v.clone();
|
||||
|
||||
if self.$name.is_some() {
|
||||
overrides.push(stringify!($name).to_uppercase());
|
||||
overrides.push(paste::paste!(stringify!([<$name:upper>])).into());
|
||||
}
|
||||
}
|
||||
)+)+
|
||||
@@ -133,19 +120,6 @@ macro_rules! make_config {
|
||||
builder
|
||||
}
|
||||
|
||||
/// Returns a new builder with all the elements from self,
|
||||
/// except those that are equal in both sides
|
||||
fn _remove(&self, other: &Self) -> Self {
|
||||
let mut builder = ConfigBuilder::default();
|
||||
$($(
|
||||
if &self.$name != &other.$name {
|
||||
builder.$name = self.$name.clone();
|
||||
}
|
||||
|
||||
)+)+
|
||||
builder
|
||||
}
|
||||
|
||||
fn build(&self) -> ConfigItems {
|
||||
let mut config = ConfigItems::default();
|
||||
let _domain_set = self.domain.is_some();
|
||||
@@ -161,12 +135,13 @@ macro_rules! make_config {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ConfigItems { $($(pub $name: make_config!{@type $ty, $none_action}, )+)+ }
|
||||
#[derive(Clone, Default)]
|
||||
struct ConfigItems { $($( $name: make_config!{@type $ty, $none_action}, )+)+ }
|
||||
|
||||
#[allow(unused)]
|
||||
impl Config {
|
||||
$($(
|
||||
$(#[doc = $doc])+
|
||||
pub fn $name(&self) -> make_config!{@type $ty, $none_action} {
|
||||
self.inner.read().unwrap().config.$name.clone()
|
||||
}
|
||||
@@ -189,38 +164,100 @@ macro_rules! make_config {
|
||||
|
||||
fn _get_doc(doc: &str) -> serde_json::Value {
|
||||
let mut split = doc.split("|>").map(str::trim);
|
||||
json!({
|
||||
"name": split.next(),
|
||||
"description": split.next()
|
||||
|
||||
// We do not use the json!() macro here since that causes a lot of macro recursion.
|
||||
// This slows down compile time and it also causes issues with rust-analyzer
|
||||
serde_json::Value::Object({
|
||||
let mut doc_json = serde_json::Map::new();
|
||||
doc_json.insert("name".into(), serde_json::to_value(split.next()).unwrap());
|
||||
doc_json.insert("description".into(), serde_json::to_value(split.next()).unwrap());
|
||||
doc_json
|
||||
})
|
||||
}
|
||||
|
||||
json!([ $({
|
||||
"group": stringify!($group),
|
||||
"grouptoggle": stringify!($($group_enabled)?),
|
||||
"groupdoc": make_config!{ @show $($groupdoc)? },
|
||||
"elements": [
|
||||
$( {
|
||||
"editable": $editable,
|
||||
"name": stringify!($name),
|
||||
"value": cfg.$name,
|
||||
"default": def.$name,
|
||||
"type": _get_form_type(stringify!($ty)),
|
||||
"doc": _get_doc(concat!($($doc),+)),
|
||||
"overridden": overriden.contains(&stringify!($name).to_uppercase()),
|
||||
}, )+
|
||||
]}, )+ ])
|
||||
// We do not use the json!() macro here since that causes a lot of macro recursion.
|
||||
// This slows down compile time and it also causes issues with rust-analyzer
|
||||
serde_json::Value::Array(<[_]>::into_vec(Box::new([
|
||||
$(
|
||||
serde_json::Value::Object({
|
||||
let mut group = serde_json::Map::new();
|
||||
group.insert("group".into(), (stringify!($group)).into());
|
||||
group.insert("grouptoggle".into(), (stringify!($($group_enabled)?)).into());
|
||||
group.insert("groupdoc".into(), (make_config!{ @show $($groupdoc)? }).into());
|
||||
|
||||
group.insert("elements".into(), serde_json::Value::Array(<[_]>::into_vec(Box::new([
|
||||
$(
|
||||
serde_json::Value::Object({
|
||||
let mut element = serde_json::Map::new();
|
||||
element.insert("editable".into(), ($editable).into());
|
||||
element.insert("name".into(), (stringify!($name)).into());
|
||||
element.insert("value".into(), serde_json::to_value(cfg.$name).unwrap());
|
||||
element.insert("default".into(), serde_json::to_value(def.$name).unwrap());
|
||||
element.insert("type".into(), (_get_form_type(stringify!($ty))).into());
|
||||
element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into());
|
||||
element.insert("overridden".into(), (overriden.contains(&paste::paste!(stringify!([<$name:upper>])).into())).into());
|
||||
element
|
||||
}),
|
||||
)+
|
||||
]))));
|
||||
group
|
||||
}),
|
||||
)+
|
||||
])))
|
||||
}
|
||||
|
||||
pub fn get_support_json(&self) -> serde_json::Value {
|
||||
// Define which config keys need to be masked.
|
||||
// Pass types will always be masked and no need to put them in the list.
|
||||
// Besides Pass, only String types will be masked via _privacy_mask.
|
||||
const PRIVACY_CONFIG: &[&str] = &[
|
||||
"allowed_iframe_ancestors",
|
||||
"database_url",
|
||||
"domain_origin",
|
||||
"domain_path",
|
||||
"domain",
|
||||
"helo_name",
|
||||
"org_creation_users",
|
||||
"signups_domains_whitelist",
|
||||
"smtp_from",
|
||||
"smtp_host",
|
||||
"smtp_username",
|
||||
];
|
||||
|
||||
let cfg = {
|
||||
let inner = &self.inner.read().unwrap();
|
||||
inner.config.clone()
|
||||
};
|
||||
|
||||
json!({ $($(
|
||||
stringify!($name): make_config!{ @supportstr $name, cfg.$name, $ty, $none_action },
|
||||
)+)+ })
|
||||
/// We map over the string and remove all alphanumeric, _ and - characters.
|
||||
/// This is the fastest way (within micro-seconds) instead of using a regex (which takes mili-seconds)
|
||||
fn _privacy_mask(value: &str) -> String {
|
||||
let mut n: u16 = 0;
|
||||
let mut colon_match = false;
|
||||
value
|
||||
.chars()
|
||||
.map(|c| {
|
||||
n += 1;
|
||||
match c {
|
||||
':' if n <= 11 => {
|
||||
colon_match = true;
|
||||
c
|
||||
}
|
||||
'/' if n <= 13 && colon_match => c,
|
||||
',' => c,
|
||||
_ => '*',
|
||||
}
|
||||
})
|
||||
.collect::<String>()
|
||||
}
|
||||
|
||||
serde_json::Value::Object({
|
||||
let mut json = serde_json::Map::new();
|
||||
$($(
|
||||
json.insert(stringify!($name).into(), make_config!{ @supportstr $name, cfg.$name, $ty, $none_action });
|
||||
)+)+;
|
||||
json
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_overrides(&self) -> Vec<String> {
|
||||
@@ -228,29 +265,30 @@ macro_rules! make_config {
|
||||
let inner = &self.inner.read().unwrap();
|
||||
inner._overrides.clone()
|
||||
};
|
||||
|
||||
overrides
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Support string print
|
||||
( @supportstr $name:ident, $value:expr, Pass, option ) => { $value.as_ref().map(|_| String::from("***")) }; // Optional pass, we map to an Option<String> with "***"
|
||||
( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { String::from("***") }; // Required pass, we return "***"
|
||||
( @supportstr $name:ident, $value:expr, $ty:ty, option ) => { // Optional other value, we return as is or convert to string to apply the privacy config
|
||||
( @supportstr $name:ident, $value:expr, Pass, option ) => { serde_json::to_value($value.as_ref().map(|_| String::from("***"))).unwrap() }; // Optional pass, we map to an Option<String> with "***"
|
||||
( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { "***".into() }; // Required pass, we return "***"
|
||||
( @supportstr $name:ident, $value:expr, String, option ) => { // Optional other value, we return as is or convert to string to apply the privacy config
|
||||
if PRIVACY_CONFIG.contains(&stringify!($name)) {
|
||||
json!($value.as_ref().map(|x| PRIVACY_REGEX.replace_all(&x.to_string(), "${1}*").to_string()))
|
||||
serde_json::to_value($value.as_ref().map(|x| _privacy_mask(x) )).unwrap()
|
||||
} else {
|
||||
json!($value)
|
||||
serde_json::to_value($value).unwrap()
|
||||
}
|
||||
};
|
||||
( @supportstr $name:ident, $value:expr, $ty:ty, $none_action:ident ) => { // Required other value, we return as is or convert to string to apply the privacy config
|
||||
( @supportstr $name:ident, $value:expr, String, $none_action:ident ) => { // Required other value, we return as is or convert to string to apply the privacy config
|
||||
if PRIVACY_CONFIG.contains(&stringify!($name)) {
|
||||
json!(PRIVACY_REGEX.replace_all(&$value.to_string(), "${1}*").to_string())
|
||||
} else {
|
||||
json!($value)
|
||||
}
|
||||
_privacy_mask(&$value).into()
|
||||
} else {
|
||||
($value).into()
|
||||
}
|
||||
};
|
||||
( @supportstr $name:ident, $value:expr, $ty:ty, option ) => { serde_json::to_value($value).unwrap() }; // Optional other value, we return as is or convert to string to apply the privacy config
|
||||
( @supportstr $name:ident, $value:expr, $ty:ty, $none_action:ident ) => { ($value).into() }; // Required other value, we return as is or convert to string to apply the privacy config
|
||||
|
||||
// Group or empty string
|
||||
( @show ) => { "" };
|
||||
@@ -300,14 +338,14 @@ make_config! {
|
||||
data_folder: String, false, def, "data".to_string();
|
||||
/// Database URL
|
||||
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
|
||||
/// Database connection pool size
|
||||
database_max_conns: u32, false, def, 10;
|
||||
/// Icon cache folder
|
||||
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
|
||||
/// Attachments folder
|
||||
attachments_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "attachments");
|
||||
/// Sends folder
|
||||
sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends");
|
||||
/// Temp folder |> Used for storing temporary file uploads
|
||||
tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp");
|
||||
/// Templates folder
|
||||
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
||||
/// Session JWT key
|
||||
@@ -333,12 +371,18 @@ make_config! {
|
||||
/// Trash purge schedule |> Cron schedule of the job that checks for trashed items to delete permanently.
|
||||
/// Defaults to daily. Set blank to disable this job.
|
||||
trash_purge_schedule: String, false, def, "0 5 0 * * *".to_string();
|
||||
/// Incomplete 2FA login schedule |> Cron schedule of the job that checks for incomplete 2FA logins.
|
||||
/// Defaults to once every minute. Set blank to disable this job.
|
||||
incomplete_2fa_schedule: String, false, def, "30 * * * * *".to_string();
|
||||
/// Emergency notification reminder schedule |> Cron schedule of the job that sends expiration reminders to emergency access grantors.
|
||||
/// Defaults to hourly. Set blank to disable this job.
|
||||
emergency_notification_reminder_schedule: String, false, def, "0 5 * * * *".to_string();
|
||||
/// Defaults to hourly. (3 minutes after the hour) Set blank to disable this job.
|
||||
emergency_notification_reminder_schedule: String, false, def, "0 3 * * * *".to_string();
|
||||
/// Emergency request timeout schedule |> Cron schedule of the job that grants emergency access requests that have met the required wait time.
|
||||
/// Defaults to hourly. Set blank to disable this job.
|
||||
emergency_request_timeout_schedule: String, false, def, "0 5 * * * *".to_string();
|
||||
/// Defaults to hourly. (7 minutes after the hour) Set blank to disable this job.
|
||||
emergency_request_timeout_schedule: String, false, def, "0 7 * * * *".to_string();
|
||||
/// Event cleanup schedule |> Cron schedule of the job that cleans old events from the event table.
|
||||
/// Defaults to daily. Set blank to disable this job.
|
||||
event_cleanup_schedule: String, false, def, "0 10 0 * * *".to_string();
|
||||
},
|
||||
|
||||
/// General settings
|
||||
@@ -372,9 +416,17 @@ make_config! {
|
||||
/// sure to inform all users of any changes to this setting.
|
||||
trash_auto_delete_days: i64, true, option;
|
||||
|
||||
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
|
||||
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||
/// otherwise it will delete them and they won't be downloaded again.
|
||||
/// Incomplete 2FA time limit |> Number of minutes to wait before a 2FA-enabled login is
|
||||
/// considered incomplete, resulting in an email notification. An incomplete 2FA login is one
|
||||
/// where the correct master password was provided but the required 2FA step was not completed,
|
||||
/// which potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||
/// This setting applies globally to all users.
|
||||
incomplete_2fa_time_limit: i64, true, def, 3;
|
||||
|
||||
/// Disable icon downloads |> Set to true to disable icon downloading in the internal icon service.
|
||||
/// This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
|
||||
/// network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
|
||||
/// will be deleted eventually, but won't be downloaded again.
|
||||
disable_icon_download: bool, true, def, false;
|
||||
/// Allow new signups |> Controls whether new users can register. Users can be invited by the vaultwarden admin even if this is disabled
|
||||
signups_allowed: bool, true, def, true;
|
||||
@@ -385,17 +437,24 @@ make_config! {
|
||||
/// If signups require email verification, limit how many emails are automatically sent when login is attempted (0 means no limit)
|
||||
signups_verify_resend_limit: u32, true, def, 6;
|
||||
/// Email domain whitelist |> Allow signups only from this list of comma-separated domains, even when signups are otherwise disabled
|
||||
signups_domains_whitelist: String, true, def, "".to_string();
|
||||
signups_domains_whitelist: String, true, def, String::new();
|
||||
/// Enable event logging |> Enables event logging for organizations.
|
||||
org_events_enabled: bool, false, def, false;
|
||||
/// Org creation users |> Allow org creation only by this list of comma-separated user emails.
|
||||
/// Blank or 'all' means all users can create orgs; 'none' means no users can create orgs.
|
||||
org_creation_users: String, true, def, "".to_string();
|
||||
org_creation_users: String, true, def, String::new();
|
||||
/// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are otherwise disabled
|
||||
invitations_allowed: bool, true, def, true;
|
||||
/// Invitation token expiration time (in hours) |> The number of hours after which an organization invite token, emergency access invite token,
|
||||
/// email verification token and deletion request token will expire (must be at least 1)
|
||||
invitation_expiration_hours: u32, false, def, 120;
|
||||
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||
emergency_access_allowed: bool, true, def, true;
|
||||
/// Password iterations |> Number of server-side passwords hashing iterations.
|
||||
/// The changes only apply when a user changes their password. Not recommended to lower the value
|
||||
password_iterations: i32, true, def, 100_000;
|
||||
/// Allow password hints |> Controls whether users can set password hints. This setting applies globally to all users.
|
||||
password_hints_allowed: bool, true, def, true;
|
||||
/// Show password hint |> Controls whether a password hint should be shown directly in the web page
|
||||
/// if SMTP service is not configured. Not recommended for publicly-accessible instances as this
|
||||
/// provides unauthenticated access to potentially sensitive data.
|
||||
@@ -406,6 +465,9 @@ make_config! {
|
||||
|
||||
/// Invitation organization name |> Name shown in the invitation emails that don't come from a specific organization
|
||||
invitation_org_name: String, true, def, "Vaultwarden".to_string();
|
||||
|
||||
/// Events days retain |> Number of days to retain events stored in the database. If unset, events are kept indefently.
|
||||
events_days_retain: i64, false, option;
|
||||
},
|
||||
|
||||
/// Advanced settings
|
||||
@@ -415,6 +477,23 @@ make_config! {
|
||||
ip_header: String, true, def, "X-Real-IP".to_string();
|
||||
/// Internal IP header property, used to avoid recomputing each time
|
||||
_ip_header_enabled: bool, false, gen, |c| &c.ip_header.trim().to_lowercase() != "none";
|
||||
/// Icon service |> The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||
/// To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||
/// which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||
/// `internal` refers to Vaultwarden's built-in icon fetching implementation. If an external
|
||||
/// service is set, an icon request to Vaultwarden will return an HTTP redirect to the
|
||||
/// corresponding icon at the external service.
|
||||
icon_service: String, false, def, "internal".to_string();
|
||||
/// _icon_service_url
|
||||
_icon_service_url: String, false, gen, |c| generate_icon_service_url(&c.icon_service);
|
||||
/// _icon_service_csp
|
||||
_icon_service_csp: String, false, gen, |c| generate_icon_service_csp(&c.icon_service, &c._icon_service_url);
|
||||
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
|
||||
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||
/// Temporary redirects are useful while testing different icon services, but once a service
|
||||
/// has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||
/// are currently better supported by the Bitwarden clients.
|
||||
icon_redirect_code: u32, true, def, 302;
|
||||
/// Positive icon cache expiry |> Number of seconds to consider that an already cached icon is fresh. After this period, the icon will be redownloaded
|
||||
icon_cache_ttl: u64, true, def, 2_592_000;
|
||||
/// Negative icon cache expiry |> Number of seconds before trying to download an icon that failed again.
|
||||
@@ -461,11 +540,33 @@ make_config! {
|
||||
/// Max database connection retries |> Number of times to retry the database connection during startup, with 1 second between each retry, set to 0 to retry indefinitely
|
||||
db_connection_retries: u32, false, def, 15;
|
||||
|
||||
/// Timeout when aquiring database connection
|
||||
database_timeout: u64, false, def, 30;
|
||||
|
||||
/// Database connection pool size
|
||||
database_max_conns: u32, false, def, 10;
|
||||
|
||||
/// Database connection init |> SQL statements to run when creating a new database connection, mainly useful for connection-scoped pragmas. If empty, a database-specific default is used.
|
||||
database_conn_init: String, false, def, String::new();
|
||||
|
||||
/// Bypass admin page security (Know the risks!) |> Disables the Admin Token for the admin page so you may use your own auth in-front
|
||||
disable_admin_token: bool, true, def, false;
|
||||
disable_admin_token: bool, false, def, false;
|
||||
|
||||
/// Allowed iframe ancestors (Know the risks!) |> Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
||||
allowed_iframe_ancestors: String, true, def, String::new();
|
||||
|
||||
/// Seconds between login requests |> Number of seconds, on average, between login and 2FA requests from the same IP address before rate limiting kicks in
|
||||
login_ratelimit_seconds: u64, false, def, 60;
|
||||
/// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `login_ratelimit_seconds`. Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2
|
||||
login_ratelimit_max_burst: u32, false, def, 10;
|
||||
|
||||
/// Seconds between admin login requests |> Number of seconds, on average, between admin requests from the same IP address before rate limiting kicks in
|
||||
admin_ratelimit_seconds: u64, false, def, 300;
|
||||
/// Max burst size for admin login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `admin_ratelimit_seconds`
|
||||
admin_ratelimit_max_burst: u32, false, def, 3;
|
||||
|
||||
/// Enable groups (BETA!) (Know the risks!) |> Enables groups support for organizations (Currently contains known issues!).
|
||||
org_groups_enabled: bool, false, def, false;
|
||||
},
|
||||
|
||||
/// Yubikey settings
|
||||
@@ -500,12 +601,14 @@ make_config! {
|
||||
_enable_smtp: bool, true, def, true;
|
||||
/// Host
|
||||
smtp_host: String, true, option;
|
||||
/// Enable Secure SMTP |> (Explicit) - Enabling this by default would use STARTTLS (Standard ports 587 or 25)
|
||||
smtp_ssl: bool, true, def, true;
|
||||
/// Force TLS |> (Implicit) - Enabling this would force the use of an SSL/TLS connection, instead of upgrading an insecure one with STARTTLS (Standard port 465)
|
||||
smtp_explicit_tls: bool, true, def, false;
|
||||
/// DEPRECATED smtp_ssl |> DEPRECATED - Please use SMTP_SECURITY
|
||||
smtp_ssl: bool, false, option;
|
||||
/// DEPRECATED smtp_explicit_tls |> DEPRECATED - Please use SMTP_SECURITY
|
||||
smtp_explicit_tls: bool, false, option;
|
||||
/// Secure SMTP |> ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption
|
||||
smtp_security: String, true, auto, |c| smtp_convert_deprecated_ssl_options(c.smtp_ssl, c.smtp_explicit_tls); // TODO: After deprecation make it `def, "starttls".to_string()`
|
||||
/// Port
|
||||
smtp_port: u16, true, auto, |c| if c.smtp_explicit_tls {465} else if c.smtp_ssl {587} else {25};
|
||||
smtp_port: u16, true, auto, |c| if c.smtp_security == *"force_tls" {465} else if c.smtp_security == *"starttls" {587} else {25};
|
||||
/// From Address
|
||||
smtp_from: String, true, def, String::new();
|
||||
/// From Name
|
||||
@@ -520,6 +623,10 @@ make_config! {
|
||||
smtp_timeout: u64, true, def, 15;
|
||||
/// Server name sent during HELO |> By default this value should be is on the machine's hostname, but might need to be changed in case it trips some anti-spam filters
|
||||
helo_name: String, true, option;
|
||||
/// Embed images as email attachments.
|
||||
smtp_embed_images: bool, true, def, true;
|
||||
/// _smtp_img_src
|
||||
_smtp_img_src: String, false, gen, |c| generate_smtp_img_src(c.smtp_embed_images, &c.domain);
|
||||
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||
smtp_debug: bool, false, def, false;
|
||||
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||
@@ -532,8 +639,8 @@ make_config! {
|
||||
email_2fa: _enable_email_2fa {
|
||||
/// Enabled |> Disabling will prevent users from setting up new email 2FA and using existing email 2FA configured
|
||||
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && c.smtp_host.is_some();
|
||||
/// Email token size |> Number of digits in an email token (min: 6, max: 19). Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting.
|
||||
email_token_size: u32, true, def, 6;
|
||||
/// Email token size |> Number of digits in an email 2FA token (min: 6, max: 255). Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting.
|
||||
email_token_size: u8, true, def, 6;
|
||||
/// Token expiration time |> Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||
email_expiration_time: u64, true, def, 600;
|
||||
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
|
||||
@@ -543,7 +650,15 @@ make_config! {
|
||||
|
||||
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
// Validate connection URL is valid and DB feature is enabled
|
||||
DbConnType::from_url(&cfg.database_url)?;
|
||||
let url = &cfg.database_url;
|
||||
if DbConnType::from_url(url)? == DbConnType::sqlite && url.contains('/') {
|
||||
let path = std::path::Path::new(&url);
|
||||
if let Some(parent) = path.parent() {
|
||||
if !parent.is_dir() {
|
||||
err!(format!("SQLite database directory `{}` does not exist or is not a directory", parent.display()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let limit = 256;
|
||||
if cfg.database_max_conns < 1 || cfg.database_max_conns > limit {
|
||||
@@ -588,6 +703,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
if cfg._enable_smtp {
|
||||
match cfg.smtp_security.as_str() {
|
||||
"off" | "starttls" | "force_tls" => (),
|
||||
_ => err!(
|
||||
"`SMTP_SECURITY` is invalid. It needs to be one of the following options: starttls, force_tls or off"
|
||||
),
|
||||
}
|
||||
|
||||
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
|
||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
||||
}
|
||||
@@ -607,21 +729,72 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||
if cfg._enable_email_2fa && cfg.email_token_size < 6 {
|
||||
err!("`EMAIL_TOKEN_SIZE` has a minimum size of 6")
|
||||
}
|
||||
|
||||
if cfg._enable_email_2fa && cfg.email_token_size > 19 {
|
||||
err!("`EMAIL_TOKEN_SIZE` has a maximum size of 19")
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the icon blacklist regex is valid
|
||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
||||
let validate_regex = Regex::new(r);
|
||||
let validate_regex = regex::Regex::new(r);
|
||||
match validate_regex {
|
||||
Ok(_) => (),
|
||||
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {:#?}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the icon service is valid
|
||||
let icon_service = cfg.icon_service.as_str();
|
||||
match icon_service {
|
||||
"internal" | "bitwarden" | "duckduckgo" | "google" => (),
|
||||
_ => {
|
||||
if !icon_service.starts_with("http") {
|
||||
err!(format!("Icon service URL `{}` must start with \"http\"", icon_service))
|
||||
}
|
||||
match icon_service.matches("{}").count() {
|
||||
1 => (), // nominal
|
||||
0 => err!(format!("Icon service URL `{}` has no placeholder \"{{}}\"", icon_service)),
|
||||
_ => err!(format!("Icon service URL `{}` has more than one placeholder \"{{}}\"", icon_service)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the icon redirect code is valid
|
||||
match cfg.icon_redirect_code {
|
||||
301 | 302 | 307 | 308 => (),
|
||||
_ => err!("Only HTTP 301/302 and 307/308 redirects are supported"),
|
||||
}
|
||||
|
||||
if cfg.invitation_expiration_hours < 1 {
|
||||
err!("`INVITATION_EXPIRATION_HOURS` has a minimum duration of 1 hour")
|
||||
}
|
||||
|
||||
// Validate schedule crontab format
|
||||
if !cfg.send_purge_schedule.is_empty() && cfg.send_purge_schedule.parse::<Schedule>().is_err() {
|
||||
err!("`SEND_PURGE_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.trash_purge_schedule.is_empty() && cfg.trash_purge_schedule.parse::<Schedule>().is_err() {
|
||||
err!("`TRASH_PURGE_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.incomplete_2fa_schedule.is_empty() && cfg.incomplete_2fa_schedule.parse::<Schedule>().is_err() {
|
||||
err!("`INCOMPLETE_2FA_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.emergency_notification_reminder_schedule.is_empty()
|
||||
&& cfg.emergency_notification_reminder_schedule.parse::<Schedule>().is_err()
|
||||
{
|
||||
err!("`EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.emergency_request_timeout_schedule.is_empty()
|
||||
&& cfg.emergency_request_timeout_schedule.parse::<Schedule>().is_err()
|
||||
{
|
||||
err!("`EMERGENCY_REQUEST_TIMEOUT_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
if !cfg.event_cleanup_schedule.is_empty() && cfg.event_cleanup_schedule.parse::<Schedule>().is_err() {
|
||||
err!("`EVENT_CLEANUP_SCHEDULE` is not a valid cron expression")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -648,6 +821,56 @@ fn extract_url_path(url: &str) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_smtp_img_src(embed_images: bool, domain: &str) -> String {
|
||||
if embed_images {
|
||||
"cid:".to_string()
|
||||
} else {
|
||||
format!("{domain}/vw_static/")
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate the correct URL for the icon service.
|
||||
/// This will be used within icons.rs to call the external icon service.
|
||||
fn generate_icon_service_url(icon_service: &str) -> String {
|
||||
match icon_service {
|
||||
"internal" => String::new(),
|
||||
"bitwarden" => "https://icons.bitwarden.net/{}/icon.png".to_string(),
|
||||
"duckduckgo" => "https://icons.duckduckgo.com/ip3/{}.ico".to_string(),
|
||||
"google" => "https://www.google.com/s2/favicons?domain={}&sz=32".to_string(),
|
||||
_ => icon_service.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate the CSP string needed to allow redirected icon fetching
|
||||
fn generate_icon_service_csp(icon_service: &str, icon_service_url: &str) -> String {
|
||||
// We split on the first '{', since that is the variable delimiter for an icon service URL.
|
||||
// Everything up until the first '{' should be fixed and can be used as an CSP string.
|
||||
let csp_string = match icon_service_url.split_once('{') {
|
||||
Some((c, _)) => c.to_string(),
|
||||
None => String::new(),
|
||||
};
|
||||
|
||||
// Because Google does a second redirect to there gstatic.com domain, we need to add an extra csp string.
|
||||
match icon_service {
|
||||
"google" => csp_string + " https://*.gstatic.com/favicon",
|
||||
_ => csp_string,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the old SMTP_SSL and SMTP_EXPLICIT_TLS options
|
||||
fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls: Option<bool>) -> String {
|
||||
if smtp_explicit_tls.is_some() || smtp_ssl.is_some() {
|
||||
println!("[DEPRECATED]: `SMTP_SSL` or `SMTP_EXPLICIT_TLS` is set. Please use `SMTP_SECURITY` instead.");
|
||||
}
|
||||
if smtp_explicit_tls.is_some() && smtp_explicit_tls.unwrap() {
|
||||
return "force_tls".to_string();
|
||||
} else if smtp_ssl.is_some() && !smtp_ssl.unwrap() {
|
||||
return "off".to_string();
|
||||
}
|
||||
// Return the default `starttls` in all other cases
|
||||
"starttls".to_string()
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load() -> Result<Self, Error> {
|
||||
// Loading from env and file
|
||||
@@ -664,6 +887,8 @@ impl Config {
|
||||
|
||||
Ok(Config {
|
||||
inner: RwLock::new(Inner {
|
||||
rocket_shutdown_handle: None,
|
||||
ws_shutdown_handle: None,
|
||||
templates: load_templates(&config.templates_folder),
|
||||
config,
|
||||
_env,
|
||||
@@ -707,7 +932,7 @@ impl Config {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
let builder = {
|
||||
let usr = &self.inner.read().unwrap()._usr;
|
||||
let mut _overrides = Vec::new();
|
||||
@@ -793,8 +1018,7 @@ impl Config {
|
||||
if let Some(akey) = self._duo_akey() {
|
||||
akey
|
||||
} else {
|
||||
let akey = crate::crypto::get_random_64();
|
||||
let akey_s = data_encoding::BASE64.encode(&akey);
|
||||
let akey_s = crate::crypto::encode_random_bytes::<64>(data_encoding::BASE64);
|
||||
|
||||
// Save the new value
|
||||
let builder = ConfigBuilder {
|
||||
@@ -828,6 +1052,26 @@ impl Config {
|
||||
hb.render(name, data).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_rocket_shutdown_handle(&self, handle: rocket::Shutdown) {
|
||||
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
|
||||
}
|
||||
|
||||
pub fn set_ws_shutdown_handle(&self, handle: tokio::sync::oneshot::Sender<()>) {
|
||||
self.inner.write().unwrap().ws_shutdown_handle = Some(handle);
|
||||
}
|
||||
|
||||
pub fn shutdown(&self) {
|
||||
if let Ok(mut c) = self.inner.write() {
|
||||
if let Some(handle) = c.ws_shutdown_handle.take() {
|
||||
handle.send(()).ok();
|
||||
}
|
||||
|
||||
if let Some(handle) = c.rocket_shutdown_handle.take() {
|
||||
handle.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, Renderable};
|
||||
@@ -861,8 +1105,6 @@ where
|
||||
|
||||
reg!("email/change_email", ".html");
|
||||
reg!("email/delete_account", ".html");
|
||||
reg!("email/invite_accepted", ".html");
|
||||
reg!("email/invite_confirmed", ".html");
|
||||
reg!("email/emergency_access_invite_accepted", ".html");
|
||||
reg!("email/emergency_access_invite_confirmed", ".html");
|
||||
reg!("email/emergency_access_recovery_approved", ".html");
|
||||
@@ -870,6 +1112,9 @@ where
|
||||
reg!("email/emergency_access_recovery_rejected", ".html");
|
||||
reg!("email/emergency_access_recovery_reminder", ".html");
|
||||
reg!("email/emergency_access_recovery_timed_out", ".html");
|
||||
reg!("email/incomplete_2fa_login", ".html");
|
||||
reg!("email/invite_accepted", ".html");
|
||||
reg!("email/invite_confirmed", ".html");
|
||||
reg!("email/new_device_logged_in", ".html");
|
||||
reg!("email/pw_hint_none", ".html");
|
||||
reg!("email/pw_hint_some", ".html");
|
||||
@@ -890,6 +1135,8 @@ where
|
||||
reg!("admin/organizations");
|
||||
reg!("admin/diagnostics");
|
||||
|
||||
reg!("404");
|
||||
|
||||
// And then load user templates to overwrite the defaults
|
||||
// Use .hbs extension for the files
|
||||
// Templates get registered with their relative name
|
||||
@@ -900,7 +1147,7 @@ where
|
||||
|
||||
fn case_helper<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
r: &'reg Handlebars,
|
||||
r: &'reg Handlebars<'_>,
|
||||
ctx: &'rc Context,
|
||||
rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
@@ -909,7 +1156,7 @@ fn case_helper<'reg, 'rc>(
|
||||
let value = param.value().clone();
|
||||
|
||||
if h.params().iter().skip(1).any(|x| x.value() == &value) {
|
||||
h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or(Ok(()))
|
||||
h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or_else(|| Ok(()))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
@@ -917,17 +1164,16 @@ fn case_helper<'reg, 'rc>(
|
||||
|
||||
fn js_escape_helper<'reg, 'rc>(
|
||||
h: &Helper<'reg, 'rc>,
|
||||
_r: &'reg Handlebars,
|
||||
_r: &'reg Handlebars<'_>,
|
||||
_ctx: &'rc Context,
|
||||
_rc: &mut RenderContext<'reg, 'rc>,
|
||||
out: &mut dyn Output,
|
||||
) -> HelperResult {
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"jsesc\""))?;
|
||||
|
||||
let no_quote = h.param(1).is_some();
|
||||
|
||||
let value =
|
||||
param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||
let value = param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"jsesc\" is not a String"))?;
|
||||
|
||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||
if !no_quote {
|
||||
|
||||
@@ -3,11 +3,9 @@
|
||||
//
|
||||
use std::num::NonZeroU32;
|
||||
|
||||
use data_encoding::HEXLOWER;
|
||||
use data_encoding::{Encoding, HEXLOWER};
|
||||
use ring::{digest, hmac, pbkdf2};
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
|
||||
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
|
||||
|
||||
@@ -39,49 +37,72 @@ pub fn hmac_sign(key: &str, data: &str) -> String {
|
||||
// Random values
|
||||
//
|
||||
|
||||
pub fn get_random_64() -> Vec<u8> {
|
||||
get_random(vec![0u8; 64])
|
||||
}
|
||||
|
||||
pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
|
||||
/// Return an array holding `N` random bytes.
|
||||
pub fn get_random_bytes<const N: usize>() -> [u8; N] {
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
|
||||
let mut array = [0; N];
|
||||
SystemRandom::new().fill(&mut array).expect("Error generating random values");
|
||||
|
||||
array
|
||||
}
|
||||
|
||||
pub fn generate_id(num_bytes: usize) -> String {
|
||||
HEXLOWER.encode(&get_random(vec![0; num_bytes]))
|
||||
/// Encode random bytes using the provided function.
|
||||
pub fn encode_random_bytes<const N: usize>(e: Encoding) -> String {
|
||||
e.encode(&get_random_bytes::<N>())
|
||||
}
|
||||
|
||||
/// Generates a random string over a specified alphabet.
|
||||
pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
|
||||
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
|
||||
use rand::Rng;
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
(0..num_chars)
|
||||
.map(|_| {
|
||||
let i = rng.gen_range(0..alphabet.len());
|
||||
alphabet[i] as char
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Generates a random numeric string.
|
||||
pub fn get_random_string_numeric(num_chars: usize) -> String {
|
||||
const ALPHABET: &[u8] = b"0123456789";
|
||||
get_random_string(ALPHABET, num_chars)
|
||||
}
|
||||
|
||||
/// Generates a random alphanumeric string.
|
||||
pub fn get_random_string_alphanum(num_chars: usize) -> String {
|
||||
const ALPHABET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
|
||||
abcdefghijklmnopqrstuvwxyz\
|
||||
0123456789";
|
||||
get_random_string(ALPHABET, num_chars)
|
||||
}
|
||||
|
||||
pub fn generate_id<const N: usize>() -> String {
|
||||
encode_random_bytes::<N>(HEXLOWER)
|
||||
}
|
||||
|
||||
pub fn generate_send_id() -> String {
|
||||
// Send IDs are globally scoped, so make them longer to avoid collisions.
|
||||
generate_id(32) // 256 bits
|
||||
generate_id::<32>() // 256 bits
|
||||
}
|
||||
|
||||
pub fn generate_attachment_id() -> String {
|
||||
// Attachment IDs are scoped to a cipher, so they can be smaller.
|
||||
generate_id(10) // 80 bits
|
||||
generate_id::<10>() // 80 bits
|
||||
}
|
||||
|
||||
pub fn generate_token(token_size: u32) -> Result<String, Error> {
|
||||
// A u64 can represent all whole numbers up to 19 digits long.
|
||||
if token_size > 19 {
|
||||
err!("Token size is limited to 19 digits")
|
||||
}
|
||||
/// Generates a numeric token for email-based verifications.
|
||||
pub fn generate_email_token(token_size: u8) -> String {
|
||||
get_random_string_numeric(token_size as usize)
|
||||
}
|
||||
|
||||
let low: u64 = 0;
|
||||
let high: u64 = 10u64.pow(token_size);
|
||||
|
||||
// Generate a random number in the range [low, high), then format it as a
|
||||
// token of fixed width, left-padding with 0 as needed.
|
||||
use rand::{thread_rng, Rng};
|
||||
let mut rng = thread_rng();
|
||||
let number: u64 = rng.gen_range(low..high);
|
||||
let token = format!("{:0size$}", number, size = token_size as usize);
|
||||
|
||||
Ok(token)
|
||||
/// Generates a personal API key.
|
||||
/// Upstream uses 30 chars, which is ~178 bits of entropy.
|
||||
pub fn generate_api_key() -> String {
|
||||
get_random_string_alphanum(30)
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
297
src/db/mod.rs
@@ -1,8 +1,20 @@
|
||||
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use diesel::{
|
||||
connection::SimpleConnection,
|
||||
r2d2::{ConnectionManager, CustomizeConnection, Pool, PooledConnection},
|
||||
};
|
||||
|
||||
use rocket::{
|
||||
http::Status,
|
||||
outcome::IntoOutcome,
|
||||
request::{FromRequest, Outcome},
|
||||
Request, State,
|
||||
Request,
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
sync::{Mutex, OwnedSemaphorePermit, Semaphore},
|
||||
time::timeout,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -22,6 +34,23 @@ pub mod __mysql_schema;
|
||||
#[path = "schemas/postgresql/schema.rs"]
|
||||
pub mod __postgresql_schema;
|
||||
|
||||
// These changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools
|
||||
|
||||
// A wrapper around spawn_blocking that propagates panics to the calling code.
|
||||
pub async fn run_blocking<F, R>(job: F) -> R
|
||||
where
|
||||
F: FnOnce() -> R + Send + 'static,
|
||||
R: Send + 'static,
|
||||
{
|
||||
match tokio::task::spawn_blocking(job).await {
|
||||
Ok(ret) => ret,
|
||||
Err(e) => match e.try_into_panic() {
|
||||
Ok(panic) => std::panic::resume_unwind(panic),
|
||||
Err(_) => unreachable!("spawn_blocking tasks are never cancelled"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// This is used to generate the main DbConn and DbPool enums, which contain one variant for each database supported
|
||||
macro_rules! generate_connections {
|
||||
( $( $name:ident: $ty:ty ),+ ) => {
|
||||
@@ -29,15 +58,73 @@ macro_rules! generate_connections {
|
||||
#[derive(Eq, PartialEq)]
|
||||
pub enum DbConnType { $( $name, )+ }
|
||||
|
||||
pub struct DbConn {
|
||||
conn: Arc<Mutex<Option<DbConnInner>>>,
|
||||
permit: Option<OwnedSemaphorePermit>,
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum DbConn { $( #[cfg($name)] $name(PooledConnection<ConnectionManager< $ty >>), )+ }
|
||||
pub enum DbConnInner { $( #[cfg($name)] $name(PooledConnection<ConnectionManager< $ty >>), )+ }
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DbConnOptions {
|
||||
pub init_stmts: String,
|
||||
}
|
||||
|
||||
$( // Based on <https://stackoverflow.com/a/57717533>.
|
||||
#[cfg($name)]
|
||||
impl CustomizeConnection<$ty, diesel::r2d2::Error> for DbConnOptions {
|
||||
fn on_acquire(&self, conn: &mut $ty) -> Result<(), diesel::r2d2::Error> {
|
||||
(|| {
|
||||
if !self.init_stmts.is_empty() {
|
||||
conn.batch_execute(&self.init_stmts)?;
|
||||
}
|
||||
Ok(())
|
||||
})().map_err(diesel::r2d2::Error::QueryError)
|
||||
}
|
||||
})+
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DbPool {
|
||||
// This is an 'Option' so that we can drop the pool in a 'spawn_blocking'.
|
||||
pool: Option<DbPoolInner>,
|
||||
semaphore: Arc<Semaphore>
|
||||
}
|
||||
|
||||
#[allow(non_camel_case_types)]
|
||||
#[derive(Clone)]
|
||||
pub enum DbPool { $( #[cfg($name)] $name(Pool<ConnectionManager< $ty >>), )+ }
|
||||
pub enum DbPoolInner { $( #[cfg($name)] $name(Pool<ConnectionManager< $ty >>), )+ }
|
||||
|
||||
impl Drop for DbConn {
|
||||
fn drop(&mut self) {
|
||||
let conn = self.conn.clone();
|
||||
let permit = self.permit.take();
|
||||
|
||||
// Since connection can't be on the stack in an async fn during an
|
||||
// await, we have to spawn a new blocking-safe thread...
|
||||
tokio::task::spawn_blocking(move || {
|
||||
// And then re-enter the runtime to wait on the async mutex, but in a blocking fashion.
|
||||
let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned());
|
||||
|
||||
if let Some(conn) = conn.take() {
|
||||
drop(conn);
|
||||
}
|
||||
|
||||
// Drop permit after the connection is dropped
|
||||
drop(permit);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DbPool {
|
||||
fn drop(&mut self) {
|
||||
let pool = self.pool.take();
|
||||
tokio::task::spawn_blocking(move || drop(pool));
|
||||
}
|
||||
}
|
||||
|
||||
impl DbPool {
|
||||
// For the given database URL, guess it's type, run migrations create pool and return it
|
||||
// For the given database URL, guess its type, run migrations, create pool, and return it
|
||||
pub fn from_config() -> Result<Self, Error> {
|
||||
let url = CONFIG.database_url();
|
||||
let conn_type = DbConnType::from_url(&url)?;
|
||||
@@ -50,9 +137,16 @@ macro_rules! generate_connections {
|
||||
let manager = ConnectionManager::new(&url);
|
||||
let pool = Pool::builder()
|
||||
.max_size(CONFIG.database_max_conns())
|
||||
.connection_timeout(Duration::from_secs(CONFIG.database_timeout()))
|
||||
.connection_customizer(Box::new(DbConnOptions{
|
||||
init_stmts: conn_type.get_init_stmts()
|
||||
}))
|
||||
.build(manager)
|
||||
.map_res("Failed to create pool")?;
|
||||
return Ok(Self::$name(pool));
|
||||
return Ok(DbPool {
|
||||
pool: Some(DbPoolInner::$name(pool)),
|
||||
semaphore: Arc::new(Semaphore::new(CONFIG.database_max_conns() as usize)),
|
||||
});
|
||||
}
|
||||
#[cfg(not($name))]
|
||||
#[allow(unreachable_code)]
|
||||
@@ -61,22 +155,46 @@ macro_rules! generate_connections {
|
||||
)+ }
|
||||
}
|
||||
// Get a connection from the pool
|
||||
pub fn get(&self) -> Result<DbConn, Error> {
|
||||
match self { $(
|
||||
pub async fn get(&self) -> Result<DbConn, Error> {
|
||||
let duration = Duration::from_secs(CONFIG.database_timeout());
|
||||
let permit = match timeout(duration, self.semaphore.clone().acquire_owned()).await {
|
||||
Ok(p) => p.expect("Semaphore should be open"),
|
||||
Err(_) => {
|
||||
err!("Timeout waiting for database connection");
|
||||
}
|
||||
};
|
||||
|
||||
match self.pool.as_ref().expect("DbPool.pool should always be Some()") { $(
|
||||
#[cfg($name)]
|
||||
Self::$name(p) => Ok(DbConn::$name(p.get().map_res("Error retrieving connection from pool")?)),
|
||||
DbPoolInner::$name(p) => {
|
||||
let pool = p.clone();
|
||||
let c = run_blocking(move || pool.get_timeout(duration)).await.map_res("Error retrieving connection from pool")?;
|
||||
|
||||
return Ok(DbConn {
|
||||
conn: Arc::new(Mutex::new(Some(DbConnInner::$name(c)))),
|
||||
permit: Some(permit)
|
||||
});
|
||||
},
|
||||
)+ }
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(not(query_logger))]
|
||||
generate_connections! {
|
||||
sqlite: diesel::sqlite::SqliteConnection,
|
||||
mysql: diesel::mysql::MysqlConnection,
|
||||
postgresql: diesel::pg::PgConnection
|
||||
}
|
||||
|
||||
#[cfg(query_logger)]
|
||||
generate_connections! {
|
||||
sqlite: diesel_logger::LoggingConnection<diesel::sqlite::SqliteConnection>,
|
||||
mysql: diesel_logger::LoggingConnection<diesel::mysql::MysqlConnection>,
|
||||
postgresql: diesel_logger::LoggingConnection<diesel::pg::PgConnection>
|
||||
}
|
||||
|
||||
impl DbConnType {
|
||||
pub fn from_url(url: &str) -> Result<DbConnType, Error> {
|
||||
// Mysql
|
||||
@@ -104,6 +222,23 @@ impl DbConnType {
|
||||
err!("`DATABASE_URL` looks like a SQLite URL, but 'sqlite' feature is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_init_stmts(&self) -> String {
|
||||
let init_stmts = CONFIG.database_conn_init();
|
||||
if !init_stmts.is_empty() {
|
||||
init_stmts
|
||||
} else {
|
||||
self.default_init_stmts()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_init_stmts(&self) -> String {
|
||||
match self {
|
||||
Self::sqlite => "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;".to_string(),
|
||||
Self::mysql => String::new(),
|
||||
Self::postgresql => String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
@@ -113,42 +248,52 @@ macro_rules! db_run {
|
||||
db_run! { $conn: sqlite, mysql, postgresql $body }
|
||||
};
|
||||
|
||||
// Different code for each db
|
||||
( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
match $conn {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
crate::db::DbConn::$db(ref $conn) => {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
#[allow(unused)] use [<__ $db _model>]::*;
|
||||
#[allow(unused)] use crate::db::FromDb;
|
||||
}
|
||||
$body
|
||||
},
|
||||
)+)+
|
||||
}}
|
||||
};
|
||||
|
||||
// Same for all dbs
|
||||
( @raw $conn:ident: $body:block ) => {
|
||||
db_run! { @raw $conn: sqlite, mysql, postgresql $body }
|
||||
};
|
||||
|
||||
// Different code for each db
|
||||
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {
|
||||
( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused_variables)]
|
||||
match $conn {
|
||||
$($(
|
||||
#[allow(unused)] use $crate::db::FromDb;
|
||||
|
||||
let conn = $conn.conn.clone();
|
||||
let mut conn = conn.lock_owned().await;
|
||||
match conn.as_mut().expect("internal invariant broken: self.connection is Some") {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
crate::db::DbConn::$db(ref $conn) => {
|
||||
$body
|
||||
$crate::db::DbConnInner::$db($conn) => {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
#[allow(unused)] use [<__ $db _model>]::*;
|
||||
}
|
||||
|
||||
tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead
|
||||
},
|
||||
)+)+
|
||||
}
|
||||
};
|
||||
}};
|
||||
|
||||
( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused)] use $crate::db::FromDb;
|
||||
|
||||
let conn = $conn.conn.clone();
|
||||
let mut conn = conn.lock_owned().await;
|
||||
match conn.as_mut().expect("internal invariant broken: self.connection is Some") {
|
||||
$($(
|
||||
#[cfg($db)]
|
||||
$crate::db::DbConnInner::$db($conn) => {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||
// @ RAW: #[allow(unused)] use [<__ $db _model>]::*;
|
||||
}
|
||||
|
||||
tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead
|
||||
},
|
||||
)+)+
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
pub trait FromDb {
|
||||
@@ -201,7 +346,7 @@ macro_rules! db_object {
|
||||
paste::paste! {
|
||||
#[allow(unused)] use super::*;
|
||||
#[allow(unused)] use diesel::prelude::*;
|
||||
#[allow(unused)] use crate::db::[<__ $db _schema>]::*;
|
||||
#[allow(unused)] use $crate::db::[<__ $db _schema>]::*;
|
||||
|
||||
$( #[$attr] )*
|
||||
pub struct [<$name Db>] { $(
|
||||
@@ -213,7 +358,7 @@ macro_rules! db_object {
|
||||
#[inline(always)] pub fn to_db(x: &super::$name) -> Self { Self { $( $field: x.$field.clone(), )+ } }
|
||||
}
|
||||
|
||||
impl crate::db::FromDb for [<$name Db>] {
|
||||
impl $crate::db::FromDb for [<$name Db>] {
|
||||
type Output = super::$name;
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
#[inline(always)] fn from_db(self) -> Self::Output { super::$name { $( $field: self.$field, )+ } }
|
||||
@@ -227,9 +372,10 @@ pub mod models;
|
||||
|
||||
/// Creates a back-up of the sqlite database
|
||||
/// MySQL/MariaDB and PostgreSQL are not supported.
|
||||
pub fn backup_database(conn: &DbConn) -> Result<(), Error> {
|
||||
pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
let _ = conn;
|
||||
err!("PostgreSQL and MySQL/MariaDB do not support this backup feature");
|
||||
}
|
||||
sqlite {
|
||||
@@ -244,15 +390,19 @@ pub fn backup_database(conn: &DbConn) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
/// Get the SQL Server version
|
||||
pub fn get_sql_server_version(conn: &DbConn) -> String {
|
||||
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
||||
db_run! {@raw conn:
|
||||
postgresql, mysql {
|
||||
no_arg_sql_function!(version, diesel::sql_types::Text);
|
||||
diesel::select(version).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
sql_function!{
|
||||
fn version() -> diesel::sql_types::Text;
|
||||
}
|
||||
diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
}
|
||||
sqlite {
|
||||
no_arg_sql_function!(sqlite_version, diesel::sql_types::Text);
|
||||
diesel::select(sqlite_version).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
sql_function!{
|
||||
fn sqlite_version() -> diesel::sql_types::Text;
|
||||
}
|
||||
diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -260,15 +410,14 @@ pub fn get_sql_server_version(conn: &DbConn) -> String {
|
||||
/// Attempts to retrieve a single connection from the managed database pool. If
|
||||
/// no pool is currently managed, fails with an `InternalServerError` status. If
|
||||
/// no connections are available, fails with a `ServiceUnavailable` status.
|
||||
impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for DbConn {
|
||||
type Error = ();
|
||||
|
||||
fn from_request(request: &'a Request<'r>) -> Outcome<DbConn, ()> {
|
||||
// https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c
|
||||
let pool = try_outcome!(request.guard::<State<DbPool>>());
|
||||
match pool.get() {
|
||||
Ok(conn) => Outcome::Success(conn),
|
||||
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())),
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
match request.rocket().state::<DbPool>() {
|
||||
Some(p) => p.get().await.map_err(|_| ()).into_outcome(Status::ServiceUnavailable),
|
||||
None => Outcome::Failure((Status::InternalServerError, ())),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -278,68 +427,64 @@ impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
|
||||
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
|
||||
#[cfg(sqlite)]
|
||||
mod sqlite_migrations {
|
||||
embed_migrations!("migrations/sqlite");
|
||||
use diesel_migrations::{EmbeddedMigrations, MigrationHarness};
|
||||
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/sqlite");
|
||||
|
||||
pub fn run_migrations() -> Result<(), super::Error> {
|
||||
// Make sure the directory exists
|
||||
let url = crate::CONFIG.database_url();
|
||||
let path = std::path::Path::new(&url);
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
if std::fs::create_dir_all(parent).is_err() {
|
||||
error!("Error creating database directory");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection = diesel::sqlite::SqliteConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
let url = crate::CONFIG.database_url();
|
||||
|
||||
// Establish a connection to the sqlite database (this will create a new one, if it does
|
||||
// not exist, and exit if there is an error).
|
||||
let mut connection = diesel::sqlite::SqliteConnection::establish(&url)?;
|
||||
|
||||
// Run the migrations after successfully establishing a connection
|
||||
// Disable Foreign Key Checks during migration
|
||||
// Scoped to a connection.
|
||||
diesel::sql_query("PRAGMA foreign_keys = OFF")
|
||||
.execute(&connection)
|
||||
.execute(&mut connection)
|
||||
.expect("Failed to disable Foreign Key Checks during migrations");
|
||||
|
||||
// Turn on WAL in SQLite
|
||||
if crate::CONFIG.enable_db_wal() {
|
||||
diesel::sql_query("PRAGMA journal_mode=wal").execute(&connection).expect("Failed to turn on WAL");
|
||||
diesel::sql_query("PRAGMA journal_mode=wal").execute(&mut connection).expect("Failed to turn on WAL");
|
||||
}
|
||||
|
||||
embedded_migrations::run_with_output(&connection, &mut std::io::stdout())?;
|
||||
connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(mysql)]
|
||||
mod mysql_migrations {
|
||||
embed_migrations!("migrations/mysql");
|
||||
use diesel_migrations::{EmbeddedMigrations, MigrationHarness};
|
||||
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/mysql");
|
||||
|
||||
pub fn run_migrations() -> Result<(), super::Error> {
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection = diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?;
|
||||
let mut connection = diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
// Scoped to a connection/session.
|
||||
diesel::sql_query("SET FOREIGN_KEY_CHECKS = 0")
|
||||
.execute(&connection)
|
||||
.execute(&mut connection)
|
||||
.expect("Failed to disable Foreign Key Checks during migrations");
|
||||
|
||||
embedded_migrations::run_with_output(&connection, &mut std::io::stdout())?;
|
||||
connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(postgresql)]
|
||||
mod postgresql_migrations {
|
||||
embed_migrations!("migrations/postgresql");
|
||||
use diesel_migrations::{EmbeddedMigrations, MigrationHarness};
|
||||
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/postgresql");
|
||||
|
||||
pub fn run_migrations() -> Result<(), super::Error> {
|
||||
use diesel::{Connection, RunQueryDsl};
|
||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||
let connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||
let mut connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||
// Disable Foreign Key Checks during migration
|
||||
|
||||
// FIXME: Per https://www.postgresql.org/docs/12/sql-set-constraints.html,
|
||||
@@ -349,10 +494,10 @@ mod postgresql_migrations {
|
||||
// Migrations that need to disable foreign key checks should run this
|
||||
// from within the migration script itself.
|
||||
diesel::sql_query("SET CONSTRAINTS ALL DEFERRED")
|
||||
.execute(&connection)
|
||||
.execute(&mut connection)
|
||||
.expect("Failed to disable Foreign Key Checks during migrations");
|
||||
|
||||
embedded_migrations::run_with_output(&connection, &mut std::io::stdout())?;
|
||||
connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,15 +2,13 @@ use std::io::ErrorKind;
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
use super::Cipher;
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "attachments"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(super::Cipher, foreign_key = "cipher_uuid")]
|
||||
#[primary_key(id)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = attachments)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(id))]
|
||||
pub struct Attachment {
|
||||
pub id: String,
|
||||
pub cipher_uuid: String,
|
||||
@@ -60,7 +58,7 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Attachment {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
match diesel::replace_into(attachments::table)
|
||||
@@ -92,7 +90,7 @@ impl Attachment {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
crate::util::retry(
|
||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||
@@ -116,14 +114,14 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for attachment in Attachment::find_by_cipher(cipher_uuid, conn) {
|
||||
attachment.delete(conn)?;
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await {
|
||||
attachment.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_id(id: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_id(id: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::id.eq(id.to_lowercase()))
|
||||
@@ -133,7 +131,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
||||
@@ -143,7 +141,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn size_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<i64> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -155,7 +153,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -166,7 +164,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn size_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
let result: Option<i64> = attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -178,7 +176,7 @@ impl Attachment {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||
@@ -188,4 +186,15 @@ impl Attachment {
|
||||
.unwrap_or(0)
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn find_all_by_ciphers(cipher_uuids: &Vec<String>, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
attachments::table
|
||||
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
|
||||
.select(attachments::all_columns)
|
||||
.load::<AttachmentDb>(conn)
|
||||
.expect("Error loading attachments")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
use crate::CONFIG;
|
||||
use chrono::{Duration, NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::CONFIG;
|
||||
|
||||
use super::{
|
||||
Attachment, CollectionCipher, Favorite, FolderCipher, Organization, User, UserOrgStatus, UserOrgType,
|
||||
UserOrganization,
|
||||
Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization,
|
||||
};
|
||||
|
||||
use crate::api::core::CipherSyncData;
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "ciphers"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Organization, foreign_key = "organization_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = ciphers)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Cipher {
|
||||
pub uuid: String,
|
||||
pub created_at: NaiveDateTime,
|
||||
@@ -82,22 +82,32 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Cipher {
|
||||
pub fn to_json(&self, host: &str, user_uuid: &str, conn: &DbConn) -> Value {
|
||||
pub async fn to_json(
|
||||
&self,
|
||||
host: &str,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
use crate::util::format_date;
|
||||
|
||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn);
|
||||
// When there are no attachments use null instead of an empty array
|
||||
let attachments_json = if attachments.is_empty() {
|
||||
Value::Null
|
||||
let mut attachments_json: Value = Value::Null;
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
|
||||
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect();
|
||||
}
|
||||
} else {
|
||||
attachments.iter().map(|c| c.to_json(host)).collect()
|
||||
};
|
||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
|
||||
if !attachments.is_empty() {
|
||||
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect()
|
||||
}
|
||||
}
|
||||
|
||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
let password_history_json =
|
||||
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||
|
||||
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, conn) {
|
||||
let (read_only, hide_passwords) = match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await {
|
||||
Some((ro, hp)) => (ro, hp),
|
||||
None => {
|
||||
error!("Cipher ownership assertion failure");
|
||||
@@ -109,7 +119,7 @@ impl Cipher {
|
||||
// If not passing an empty object, mobile clients will crash.
|
||||
let mut type_data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({}));
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||
if self.atype == 1 {
|
||||
if type_data_json["Uris"].is_array() {
|
||||
@@ -124,13 +134,23 @@ impl Cipher {
|
||||
// Clone the type_data and add some default value.
|
||||
let mut data_json = type_data_json.clone();
|
||||
|
||||
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
|
||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||
// data_json should always contain the following keys with every atype
|
||||
data_json["Fields"] = json!(fields_json);
|
||||
data_json["Name"] = json!(self.name);
|
||||
data_json["Notes"] = json!(self.notes);
|
||||
data_json["PasswordHistory"] = json!(password_history_json);
|
||||
|
||||
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
Cow::from(cipher_collections)
|
||||
} else {
|
||||
Cow::from(Vec::with_capacity(0))
|
||||
}
|
||||
} else {
|
||||
Cow::from(self.get_collections(user_uuid.to_string(), conn).await)
|
||||
};
|
||||
|
||||
// There are three types of cipher response models in upstream
|
||||
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
|
||||
// of increasing level of detail). vaultwarden currently only
|
||||
@@ -142,10 +162,11 @@ impl Cipher {
|
||||
"Object": "cipherDetails",
|
||||
"Id": self.uuid,
|
||||
"Type": self.atype,
|
||||
"CreationDate": format_date(&self.created_at),
|
||||
"RevisionDate": format_date(&self.updated_at),
|
||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||
"FolderId": self.get_folder_uuid(user_uuid, conn),
|
||||
"Favorite": self.is_favorite(user_uuid, conn),
|
||||
"FolderId": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string() ) } else { self.get_folder_uuid(user_uuid, conn).await },
|
||||
"Favorite": if let Some(cipher_sync_data) = cipher_sync_data { cipher_sync_data.cipher_favorites.contains(&self.uuid) } else { self.is_favorite(user_uuid, conn).await },
|
||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||
"OrganizationId": self.organization_uuid,
|
||||
"Attachments": attachments_json,
|
||||
@@ -154,7 +175,7 @@ impl Cipher {
|
||||
"OrganizationUseTotp": true,
|
||||
|
||||
// This field is specific to the cipherDetails type.
|
||||
"CollectionIds": self.get_collections(user_uuid, conn),
|
||||
"CollectionIds": collection_ids,
|
||||
|
||||
"Name": self.name,
|
||||
"Notes": self.notes,
|
||||
@@ -189,28 +210,28 @@ impl Cipher {
|
||||
json_object
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<String> {
|
||||
let mut user_uuids = Vec::new();
|
||||
match self.user_uuid {
|
||||
Some(ref user_uuid) => {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
user_uuids.push(user_uuid.clone())
|
||||
}
|
||||
None => {
|
||||
// Belongs to Organization, need to update affected users
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
for user_org in UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||
user_uuids.push(user_org.user_uuid.clone())
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
user_uuids
|
||||
}
|
||||
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
@@ -244,13 +265,13 @@ impl Cipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
|
||||
FolderCipher::delete_all_by_cipher(&self.uuid, conn)?;
|
||||
CollectionCipher::delete_all_by_cipher(&self.uuid, conn)?;
|
||||
Attachment::delete_all_by_cipher(&self.uuid, conn)?;
|
||||
Favorite::delete_all_by_cipher(&self.uuid, conn)?;
|
||||
FolderCipher::delete_all_by_cipher(&self.uuid, conn).await?;
|
||||
CollectionCipher::delete_all_by_cipher(&self.uuid, conn).await?;
|
||||
Attachment::delete_all_by_cipher(&self.uuid, conn).await?;
|
||||
Favorite::delete_all_by_cipher(&self.uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(ciphers::table.filter(ciphers::uuid.eq(&self.uuid)))
|
||||
@@ -259,54 +280,55 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_by_org(org_uuid, conn) {
|
||||
cipher.delete(conn)?;
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
// TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching.
|
||||
for cipher in Self::find_by_org(org_uuid, conn).await {
|
||||
cipher.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_owned_by_user(user_uuid, conn) {
|
||||
cipher.delete(conn)?;
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
for cipher in Self::find_owned_by_user(user_uuid, conn).await {
|
||||
cipher.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Purge all ciphers that are old enough to be auto-deleted.
|
||||
pub fn purge_trash(conn: &DbConn) {
|
||||
pub async fn purge_trash(conn: &mut DbConn) {
|
||||
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
||||
let now = Utc::now().naive_utc();
|
||||
let dt = now - Duration::days(auto_delete_days);
|
||||
for cipher in Self::find_deleted_before(&dt, conn) {
|
||||
cipher.delete(conn).ok();
|
||||
for cipher in Self::find_deleted_before(&dt, conn).await {
|
||||
cipher.delete(conn).await.ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
pub async fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
|
||||
match (self.get_folder_uuid(user_uuid, conn), folder_uuid) {
|
||||
match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) {
|
||||
// No changes
|
||||
(None, None) => Ok(()),
|
||||
(Some(ref old), Some(ref new)) if old == new => Ok(()),
|
||||
|
||||
// Add to folder
|
||||
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn),
|
||||
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await,
|
||||
|
||||
// Remove from folder
|
||||
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn) {
|
||||
Some(old) => old.delete(conn),
|
||||
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
|
||||
Some(old) => old.delete(conn).await,
|
||||
None => err!("Couldn't move from previous folder"),
|
||||
},
|
||||
|
||||
// Move to another folder
|
||||
(Some(old), Some(new)) => {
|
||||
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn) {
|
||||
old.delete(conn)?;
|
||||
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
|
||||
old.delete(conn).await?;
|
||||
}
|
||||
FolderCipher::new(&new, &self.uuid).save(conn)
|
||||
FolderCipher::new(&new, &self.uuid).save(conn).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -317,13 +339,38 @@ impl Cipher {
|
||||
}
|
||||
|
||||
/// Returns whether this cipher is owned by an org in which the user has full access.
|
||||
pub fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
async fn is_in_full_access_org(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(cached_user_org) = cipher_sync_data.user_organizations.get(org_uuid) {
|
||||
return cached_user_org.has_full_access();
|
||||
}
|
||||
} else if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await {
|
||||
return user_org.has_full_access();
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns whether this cipher is owned by an group in which the user has full access.
|
||||
async fn is_in_full_access_group(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
if let Some(ref org_uuid) = self.organization_uuid {
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some();
|
||||
} else {
|
||||
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -332,75 +379,133 @@ impl Cipher {
|
||||
/// not in any collection the user has access to. Otherwise, the user has
|
||||
/// access to this cipher, and Some(read_only, hide_passwords) represents
|
||||
/// the access restrictions.
|
||||
pub fn get_access_restrictions(&self, user_uuid: &str, conn: &DbConn) -> Option<(bool, bool)> {
|
||||
pub async fn get_access_restrictions(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<(bool, bool)> {
|
||||
// Check whether this cipher is directly owned by the user, or is in
|
||||
// a collection that the user has full access to. If so, there are no
|
||||
// access restrictions.
|
||||
if self.is_owned_by_user(user_uuid) || self.is_in_full_access_org(user_uuid, conn) {
|
||||
if self.is_owned_by_user(user_uuid)
|
||||
|| self.is_in_full_access_org(user_uuid, cipher_sync_data, conn).await
|
||||
|| self.is_in_full_access_group(user_uuid, cipher_sync_data, conn).await
|
||||
{
|
||||
return Some((false, false));
|
||||
}
|
||||
|
||||
let rows = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
let mut rows: Vec<(bool, bool)> = Vec::new();
|
||||
if let Some(collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||
for collection in collections {
|
||||
//User permissions
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(collection) {
|
||||
rows.push((uc.read_only, uc.hide_passwords));
|
||||
}
|
||||
|
||||
//Group permissions
|
||||
if let Some(cg) = cipher_sync_data.user_collections_groups.get(collection) {
|
||||
rows.push((cg.read_only, cg.hide_passwords));
|
||||
}
|
||||
}
|
||||
}
|
||||
rows
|
||||
} else {
|
||||
let mut access_flags = self.get_user_collections_access_flags(user_uuid, conn).await;
|
||||
access_flags.append(&mut self.get_group_collections_access_flags(user_uuid, conn).await);
|
||||
access_flags
|
||||
};
|
||||
|
||||
if rows.is_empty() {
|
||||
// This cipher isn't in any collections accessible to the user.
|
||||
return None;
|
||||
}
|
||||
|
||||
// A cipher can be in multiple collections with inconsistent access flags.
|
||||
// For example, a cipher could be in one collection where the user has
|
||||
// read-only access, but also in another collection where the user has
|
||||
// read/write access. For a flag to be in effect for a cipher, upstream
|
||||
// requires all collections the cipher is in to have that flag set.
|
||||
// Therefore, we do a boolean AND of all values in each of the `read_only`
|
||||
// and `hide_passwords` columns. This could ideally be done as part of the
|
||||
// query, but Diesel doesn't support a min() or bool_and() function on
|
||||
// booleans and this behavior isn't portable anyway.
|
||||
let mut read_only = true;
|
||||
let mut hide_passwords = true;
|
||||
for (ro, hp) in rows.iter() {
|
||||
read_only &= ro;
|
||||
hide_passwords &= hp;
|
||||
}
|
||||
|
||||
Some((read_only, hide_passwords))
|
||||
}
|
||||
|
||||
async fn get_user_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> {
|
||||
db_run! {conn: {
|
||||
// Check whether this cipher is in any collections accessible to the
|
||||
// user. If so, retrieve the access flags for each collection.
|
||||
let query = ciphers::table
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(&self.uuid))
|
||||
.inner_join(ciphers_collections::table.on(
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
|
||||
.inner_join(users_collections::table.on(
|
||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||
.and(users_collections::user_uuid.eq(user_uuid))))
|
||||
.select((users_collections::read_only, users_collections::hide_passwords));
|
||||
|
||||
// There's an edge case where a cipher can be in multiple collections
|
||||
// with inconsistent access flags. For example, a cipher could be in
|
||||
// one collection where the user has read-only access, but also in
|
||||
// another collection where the user has read/write access. To handle
|
||||
// this, we do a boolean OR of all values in each of the `read_only`
|
||||
// and `hide_passwords` columns. This could ideally be done as part
|
||||
// of the query, but Diesel doesn't support a max() or bool_or()
|
||||
// function on booleans and this behavior isn't portable anyway.
|
||||
if let Ok(vec) = query.load::<(bool, bool)>(conn) {
|
||||
let mut read_only = false;
|
||||
let mut hide_passwords = false;
|
||||
for (ro, hp) in vec.iter() {
|
||||
read_only |= ro;
|
||||
hide_passwords |= hp;
|
||||
}
|
||||
|
||||
Some((read_only, hide_passwords))
|
||||
} else {
|
||||
// This cipher isn't in any collections accessible to the user.
|
||||
None
|
||||
}
|
||||
.select((users_collections::read_only, users_collections::hide_passwords))
|
||||
.load::<(bool, bool)>(conn)
|
||||
.expect("Error getting user access restrictions")
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match self.get_access_restrictions(user_uuid, conn) {
|
||||
async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(&self.uuid))
|
||||
.inner_join(ciphers_collections::table.on(
|
||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||
))
|
||||
.inner_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
|
||||
))
|
||||
.inner_join(groups_users::table.on(
|
||||
groups_users::groups_uuid.eq(collections_groups::groups_uuid)
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::uuid.eq(groups_users::users_organizations_uuid)
|
||||
))
|
||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||
.select((collections_groups::read_only, collections_groups::hide_passwords))
|
||||
.load::<(bool, bool)>(conn)
|
||||
.expect("Error getting group access restrictions")
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match self.get_access_restrictions(user_uuid, None, conn).await {
|
||||
Some((read_only, _hide_passwords)) => !read_only,
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
self.get_access_restrictions(user_uuid, conn).is_some()
|
||||
pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
self.get_access_restrictions(user_uuid, None, conn).await.is_some()
|
||||
}
|
||||
|
||||
// Returns whether this cipher is a favorite of the specified user.
|
||||
pub fn is_favorite(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
Favorite::is_favorite(&self.uuid, user_uuid, conn)
|
||||
pub async fn is_favorite(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
Favorite::is_favorite(&self.uuid, user_uuid, conn).await
|
||||
}
|
||||
|
||||
// Sets whether this cipher is a favorite of the specified user.
|
||||
pub fn set_favorite(&self, favorite: Option<bool>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn set_favorite(&self, favorite: Option<bool>, user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
match favorite {
|
||||
None => Ok(()), // No change requested.
|
||||
Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn),
|
||||
Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option<String> {
|
||||
pub async fn get_folder_uuid(&self, user_uuid: &str, conn: &mut DbConn) -> Option<String> {
|
||||
db_run! {conn: {
|
||||
folders_ciphers::table
|
||||
.inner_join(folders::table)
|
||||
@@ -412,7 +517,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::uuid.eq(uuid))
|
||||
@@ -425,16 +530,16 @@ impl Cipher {
|
||||
// Find all ciphers accessible or visible to the specified user.
|
||||
//
|
||||
// "Accessible" means the user has read access to the cipher, either via
|
||||
// direct ownership or via collection access.
|
||||
// direct ownership, collection or via group access.
|
||||
//
|
||||
// "Visible" usually means the same as accessible, except when an org
|
||||
// owner/admin sets their account to have access to only selected
|
||||
// owner/admin sets their account or group to have access to only selected
|
||||
// collections in the org (presumably because they aren't interested in
|
||||
// the other collections in the org). In this case, if `visible_only` is
|
||||
// true, then the non-interesting ciphers will not be returned. As a
|
||||
// result, those ciphers will not appear in "My Vault" for the org
|
||||
// owner/admin, but they can still be accessed via the org vault view.
|
||||
pub fn find_by_user(user_uuid: &str, visible_only: bool, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
let mut query = ciphers::table
|
||||
.left_join(ciphers_collections::table.on(
|
||||
@@ -450,9 +555,22 @@ impl Cipher {
|
||||
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
||||
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
collections_groups::groups_uuid.eq(groups::uuid)
|
||||
)
|
||||
))
|
||||
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
||||
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
||||
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||
.or_filter(groups::access_all.eq(true)) // Access via groups
|
||||
.or_filter(collections_groups::collections_uuid.is_not_null()) // Access via groups
|
||||
.into_boxed();
|
||||
|
||||
if !visible_only {
|
||||
@@ -469,12 +587,12 @@ impl Cipher {
|
||||
}
|
||||
|
||||
// Find all ciphers visible to the specified user.
|
||||
pub fn find_by_user_visible(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
Self::find_by_user(user_uuid, true, conn)
|
||||
pub async fn find_by_user_visible(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
Self::find_by_user(user_uuid, true, conn).await
|
||||
}
|
||||
|
||||
// Find all ciphers directly owned by the specified user.
|
||||
pub fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(
|
||||
@@ -485,7 +603,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_owned_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||
@@ -496,7 +614,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
@@ -504,7 +622,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||
@@ -515,7 +633,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
folders_ciphers::table.inner_join(ciphers::table)
|
||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||
@@ -525,7 +643,7 @@ impl Cipher {
|
||||
}
|
||||
|
||||
/// Find all ciphers that were deleted before the specified datetime.
|
||||
pub fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_deleted_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! {conn: {
|
||||
ciphers::table
|
||||
.filter(ciphers::deleted_at.lt(dt))
|
||||
@@ -533,7 +651,7 @@ impl Cipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
||||
pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.inner_join(collections::table.on(
|
||||
@@ -541,12 +659,12 @@ impl Cipher {
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_id)
|
||||
users_organizations::user_uuid.eq(user_id.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
users_collections::user_uuid.eq(user_id)
|
||||
users_collections::user_uuid.eq(user_id.clone())
|
||||
)
|
||||
))
|
||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||
@@ -559,4 +677,43 @@ impl Cipher {
|
||||
.load::<String>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
|
||||
/// Return a Vec with (cipher_uuid, collection_uuid)
|
||||
/// This is used during a full sync so we only need one query for all collections accessible.
|
||||
pub async fn get_collections_with_cipher_by_user(user_id: String, conn: &mut DbConn) -> Vec<(String, String)> {
|
||||
db_run! {conn: {
|
||||
ciphers_collections::table
|
||||
.inner_join(collections::table.on(
|
||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||
))
|
||||
.inner_join(users_organizations::table.on(
|
||||
users_organizations::org_uuid.eq(collections::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_id.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
users_collections::user_uuid.eq(user_id.clone())
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||
collections_groups::groups_uuid.eq(groups::uuid)
|
||||
)
|
||||
))
|
||||
.or_filter(users_collections::user_uuid.eq(user_id)) // User has access to collection
|
||||
.or_filter(users_organizations::access_all.eq(true)) // User has access all
|
||||
.or_filter(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
|
||||
.or_filter(groups::access_all.eq(true)) //Access via group
|
||||
.or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group
|
||||
.select(ciphers_collections::all_columns)
|
||||
.load::<(String, String)>(conn).unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +1,20 @@
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{Cipher, Organization, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "collections"]
|
||||
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = collections)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct Collection {
|
||||
pub uuid: String,
|
||||
pub org_uuid: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[table_name = "users_collections"]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
||||
#[primary_key(user_uuid, collection_uuid)]
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[diesel(table_name = users_collections)]
|
||||
#[diesel(primary_key(user_uuid, collection_uuid))]
|
||||
pub struct CollectionUser {
|
||||
pub user_uuid: String,
|
||||
pub collection_uuid: String,
|
||||
@@ -25,11 +22,9 @@ db_object! {
|
||||
pub hide_passwords: bool,
|
||||
}
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations)]
|
||||
#[table_name = "ciphers_collections"]
|
||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
||||
#[primary_key(cipher_uuid, collection_uuid)]
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[diesel(table_name = ciphers_collections)]
|
||||
#[diesel(primary_key(cipher_uuid, collection_uuid))]
|
||||
pub struct CollectionCipher {
|
||||
pub cipher_uuid: String,
|
||||
pub collection_uuid: String,
|
||||
@@ -57,11 +52,32 @@ impl Collection {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_details(&self, user_uuid: &str, conn: &DbConn) -> Value {
|
||||
pub async fn to_json_details(
|
||||
&self,
|
||||
user_uuid: &str,
|
||||
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
|
||||
Some(uo) if uo.has_full_access() => (false, false),
|
||||
Some(_) => {
|
||||
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
|
||||
(uc.read_only, uc.hide_passwords)
|
||||
} else {
|
||||
(false, false)
|
||||
}
|
||||
}
|
||||
_ => (true, true),
|
||||
}
|
||||
} else {
|
||||
(!self.is_writable_by_user(user_uuid, conn).await, self.hide_passwords_for_user(user_uuid, conn).await)
|
||||
};
|
||||
|
||||
let mut json_object = self.to_json();
|
||||
json_object["Object"] = json!("collectionDetails");
|
||||
json_object["ReadOnly"] = json!(!self.is_writable_by_user(user_uuid, conn));
|
||||
json_object["HidePasswords"] = json!(self.hide_passwords_for_user(user_uuid, conn));
|
||||
json_object["ReadOnly"] = json!(read_only);
|
||||
json_object["HidePasswords"] = json!(hide_passwords);
|
||||
json_object
|
||||
}
|
||||
}
|
||||
@@ -73,8 +89,8 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Collection {
|
||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -107,10 +123,11 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn);
|
||||
CollectionCipher::delete_all_by_collection(&self.uuid, conn)?;
|
||||
CollectionUser::delete_all_by_collection(&self.uuid, conn)?;
|
||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||
self.update_users_revision(conn).await;
|
||||
CollectionCipher::delete_all_by_collection(&self.uuid, conn).await?;
|
||||
CollectionUser::delete_all_by_collection(&self.uuid, conn).await?;
|
||||
CollectionGroup::delete_all_by_collection(&self.uuid, conn).await?;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid)))
|
||||
@@ -119,20 +136,20 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for collection in Self::find_by_organization(org_uuid, conn) {
|
||||
collection.delete(conn)?;
|
||||
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
for collection in Self::find_by_organization(org_uuid, conn).await {
|
||||
collection.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_users_revision(&self, conn: &DbConn) {
|
||||
UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).iter().for_each(|user_org| {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||
});
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) {
|
||||
for user_org in UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
@@ -142,17 +159,28 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid)
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
users_organizations::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(
|
||||
@@ -161,17 +189,40 @@ impl Collection {
|
||||
.filter(
|
||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true) // access_all in Organization
|
||||
).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null()
|
||||
)
|
||||
)
|
||||
).select(collections::all_columns)
|
||||
)
|
||||
.select(collections::all_columns)
|
||||
.distinct()
|
||||
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
Self::find_by_user_uuid(user_uuid, conn).into_iter().filter(|c| c.org_uuid == org_uuid).collect()
|
||||
// Check if a user has access to a specific collection
|
||||
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
|
||||
// For now this is a good solution without making to much changes.
|
||||
pub async fn has_access_by_collection_and_user_uuid(
|
||||
collection_uuid: &str,
|
||||
user_uuid: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> bool {
|
||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
|
||||
}
|
||||
|
||||
pub fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|c| c.org_uuid == org_uuid)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::org_uuid.eq(org_uuid))
|
||||
@@ -181,7 +232,7 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
@@ -193,12 +244,12 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
collections::table
|
||||
.left_join(users_collections::table.on(
|
||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||
users_collections::user_uuid.eq(user_uuid)
|
||||
users_collections::user_uuid.eq(user_uuid.clone())
|
||||
)
|
||||
))
|
||||
.left_join(users_organizations::table.on(
|
||||
@@ -206,11 +257,27 @@ impl Collection {
|
||||
users_organizations::user_uuid.eq(user_uuid)
|
||||
)
|
||||
))
|
||||
.left_join(groups_users::table.on(
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||
))
|
||||
.left_join(groups::table.on(
|
||||
groups::uuid.eq(groups_users::groups_uuid)
|
||||
))
|
||||
.left_join(collections_groups::table.on(
|
||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||
collections_groups::collections_uuid.eq(collections::uuid)
|
||||
)
|
||||
))
|
||||
.filter(collections::uuid.eq(uuid))
|
||||
.filter(
|
||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||
)).or(
|
||||
groups::access_all.eq(true) // access_all in groups
|
||||
).or( // access via groups
|
||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||
collections_groups::collections_uuid.is_not_null()
|
||||
)
|
||||
)
|
||||
).select(collections::all_columns)
|
||||
@@ -219,8 +286,8 @@ impl Collection {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn) {
|
||||
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
None => false, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
@@ -241,8 +308,8 @@ impl Collection {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn) {
|
||||
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||
match UserOrganization::find_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
|
||||
None => true, // Not in Org
|
||||
Some(user_org) => {
|
||||
if user_org.has_full_access() {
|
||||
@@ -266,7 +333,7 @@ impl Collection {
|
||||
|
||||
/// Database methods
|
||||
impl CollectionUser {
|
||||
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
@@ -279,14 +346,14 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn save(
|
||||
pub async fn save(
|
||||
user_uuid: &str,
|
||||
collection_uuid: &str,
|
||||
read_only: bool,
|
||||
hide_passwords: bool,
|
||||
conn: &DbConn,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
User::update_uuid_revision(user_uuid, conn);
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -337,8 +404,8 @@ impl CollectionUser {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn);
|
||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.user_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
@@ -351,7 +418,7 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_collection(collection_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||
@@ -362,7 +429,11 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_collection_and_user(collection_uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_collection_and_user(
|
||||
collection_uuid: &str,
|
||||
user_uuid: &str,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||
@@ -374,10 +445,21 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
CollectionUser::find_by_collection(collection_uuid, conn).iter().for_each(|collection| {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn);
|
||||
});
|
||||
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
users_collections::table
|
||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||
.select(users_collections::all_columns)
|
||||
.load::<CollectionUserDb>(conn)
|
||||
.expect("Error loading users_collections")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() {
|
||||
User::update_uuid_revision(&collection.user_uuid, conn).await;
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid)))
|
||||
@@ -386,8 +468,8 @@ impl CollectionUser {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn);
|
||||
pub async fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
for user in collectionusers {
|
||||
@@ -405,8 +487,8 @@ impl CollectionUser {
|
||||
|
||||
/// Database methods
|
||||
impl CollectionCipher {
|
||||
pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn);
|
||||
pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn).await;
|
||||
|
||||
db_run! { conn:
|
||||
sqlite, mysql {
|
||||
@@ -435,8 +517,8 @@ impl CollectionCipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn);
|
||||
pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
Self::update_users_revision(collection_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
@@ -449,7 +531,7 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
|
||||
.execute(conn)
|
||||
@@ -457,7 +539,7 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
|
||||
.execute(conn)
|
||||
@@ -465,9 +547,9 @@ impl CollectionCipher {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn update_users_revision(collection_uuid: &str, conn: &DbConn) {
|
||||
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn) {
|
||||
collection.update_users_revision(conn);
|
||||
pub async fn update_users_revision(collection_uuid: &str, conn: &mut DbConn) {
|
||||
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await {
|
||||
collection.update_users_revision(conn).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
|
||||
use super::User;
|
||||
use crate::CONFIG;
|
||||
|
||||
db_object! {
|
||||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "devices"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = devices)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid, user_uuid))]
|
||||
pub struct Device {
|
||||
pub uuid: String,
|
||||
pub created_at: NaiveDateTime,
|
||||
@@ -17,8 +15,7 @@ db_object! {
|
||||
pub user_uuid: String,
|
||||
|
||||
pub name: String,
|
||||
// https://github.com/bitwarden/core/tree/master/src/Core/Enums
|
||||
pub atype: i32,
|
||||
pub atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
|
||||
pub push_token: Option<String>,
|
||||
|
||||
pub refresh_token: String,
|
||||
@@ -51,7 +48,7 @@ impl Device {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64;
|
||||
|
||||
let twofactor_remember = BASE64.encode(&crypto::get_random(vec![0u8; 180]));
|
||||
let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64);
|
||||
self.twofactor_remember = Some(twofactor_remember.clone());
|
||||
|
||||
twofactor_remember
|
||||
@@ -61,13 +58,18 @@ impl Device {
|
||||
self.twofactor_remember = None;
|
||||
}
|
||||
|
||||
pub fn refresh_tokens(&mut self, user: &super::User, orgs: Vec<super::UserOrganization>) -> (String, i64) {
|
||||
pub fn refresh_tokens(
|
||||
&mut self,
|
||||
user: &super::User,
|
||||
orgs: Vec<super::UserOrganization>,
|
||||
scope: Vec<String>,
|
||||
) -> (String, i64) {
|
||||
// If there is no refresh token, we create one
|
||||
if self.refresh_token.is_empty() {
|
||||
use crate::crypto;
|
||||
use data_encoding::BASE64URL;
|
||||
|
||||
self.refresh_token = BASE64URL.encode(&crypto::get_random_64());
|
||||
self.refresh_token = crypto::encode_random_bytes::<64>(BASE64URL);
|
||||
}
|
||||
|
||||
// Update the expiration of the device and the last update date
|
||||
@@ -85,11 +87,11 @@ impl Device {
|
||||
nbf: time_now.timestamp(),
|
||||
exp: (time_now + *DEFAULT_VALIDITY).timestamp(),
|
||||
iss: JWT_LOGIN_ISSUER.to_string(),
|
||||
sub: user.uuid.to_string(),
|
||||
sub: user.uuid.clone(),
|
||||
|
||||
premium: true,
|
||||
name: user.name.to_string(),
|
||||
email: user.email.to_string(),
|
||||
name: user.name.clone(),
|
||||
email: user.email.clone(),
|
||||
email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(),
|
||||
|
||||
orgowner,
|
||||
@@ -97,9 +99,9 @@ impl Device {
|
||||
orguser,
|
||||
orgmanager,
|
||||
|
||||
sstamp: user.security_stamp.to_string(),
|
||||
device: self.uuid.to_string(),
|
||||
scope: vec!["api".into(), "offline_access".into()],
|
||||
sstamp: user.security_stamp.clone(),
|
||||
device: self.uuid.clone(),
|
||||
scope,
|
||||
amr: vec!["Application".into()],
|
||||
};
|
||||
|
||||
@@ -114,7 +116,7 @@ use crate::error::MapResult;
|
||||
|
||||
/// Database methods
|
||||
impl Device {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
@@ -127,39 +129,33 @@ impl Device {
|
||||
postgresql {
|
||||
let value = DeviceDb::to_db(self);
|
||||
crate::util::retry(
|
||||
|| diesel::insert_into(devices::table).values(&value).on_conflict(devices::uuid).do_update().set(&value).execute(conn),
|
||||
|| diesel::insert_into(devices::table).values(&value).on_conflict((devices::uuid, devices::user_uuid)).do_update().set(&value).execute(conn),
|
||||
10,
|
||||
).map_res("Error saving device")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
diesel::delete(devices::table.filter(devices::uuid.eq(self.uuid)))
|
||||
diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid)))
|
||||
.execute(conn)
|
||||
.map_res("Error removing device")
|
||||
.map_res("Error removing devices for user")
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for device in Self::find_by_user(user_uuid, conn) {
|
||||
device.delete(conn)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::uuid.eq(uuid))
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
.first::<DeviceDb>(conn)
|
||||
.ok()
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_refresh_token(refresh_token: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::refresh_token.eq(refresh_token))
|
||||
@@ -169,17 +165,7 @@ impl Device {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
.load::<DeviceDb>(conn)
|
||||
.expect("Error loading devices")
|
||||
.from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_latest_active_by_user(user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_latest_active_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
devices::table
|
||||
.filter(devices::user_uuid.eq(user_uuid))
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
use chrono::{NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
|
||||
|
||||
use super::User;
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||
#[table_name = "emergency_access"]
|
||||
#[changeset_options(treat_none_as_null="true")]
|
||||
#[belongs_to(User, foreign_key = "grantor_uuid")]
|
||||
#[primary_key(uuid)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||
#[diesel(table_name = emergency_access)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
pub struct EmergencyAccess {
|
||||
pub uuid: String,
|
||||
pub grantor_uuid: String,
|
||||
@@ -28,14 +29,14 @@ db_object! {
|
||||
/// Local methods
|
||||
|
||||
impl EmergencyAccess {
|
||||
pub fn new(grantor_uuid: String, email: Option<String>, status: i32, atype: i32, wait_time_days: i32) -> Self {
|
||||
pub fn new(grantor_uuid: String, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self {
|
||||
let now = Utc::now().naive_utc();
|
||||
|
||||
Self {
|
||||
uuid: crate::util::get_uuid(),
|
||||
grantor_uuid,
|
||||
grantee_uuid: None,
|
||||
email,
|
||||
email: Some(email),
|
||||
status,
|
||||
atype,
|
||||
wait_time_days,
|
||||
@@ -55,14 +56,6 @@ impl EmergencyAccess {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_type(&self, access_type: EmergencyAccessType) -> bool {
|
||||
self.atype == access_type as i32
|
||||
}
|
||||
|
||||
pub fn has_status(&self, status: EmergencyAccessStatus) -> bool {
|
||||
self.status == status as i32
|
||||
}
|
||||
|
||||
pub fn to_json(&self) -> Value {
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
@@ -73,8 +66,8 @@ impl EmergencyAccess {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_json_grantor_details(&self, conn: &DbConn) -> Value {
|
||||
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).expect("Grantor user not found.");
|
||||
pub async fn to_json_grantor_details(&self, conn: &mut DbConn) -> Value {
|
||||
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
|
||||
|
||||
json!({
|
||||
"Id": self.uuid,
|
||||
@@ -88,12 +81,11 @@ impl EmergencyAccess {
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::manual_map)]
|
||||
pub fn to_json_grantee_details(&self, conn: &DbConn) -> Value {
|
||||
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
|
||||
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
||||
Some(User::find_by_uuid(grantee_uuid, conn).expect("Grantee user not found."))
|
||||
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
|
||||
} else if let Some(email) = self.email.as_deref() {
|
||||
Some(User::find_by_mail(email, conn).expect("Grantee user not found."))
|
||||
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -111,7 +103,7 @@ impl EmergencyAccess {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)]
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum EmergencyAccessType {
|
||||
View = 0,
|
||||
Takeover = 1,
|
||||
@@ -127,18 +119,6 @@ impl EmergencyAccessType {
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<i32> for EmergencyAccessType {
|
||||
fn eq(&self, other: &i32) -> bool {
|
||||
*other == *self as i32
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<EmergencyAccessType> for i32 {
|
||||
fn eq(&self, other: &EmergencyAccessType) -> bool {
|
||||
*self == *other as i32
|
||||
}
|
||||
}
|
||||
|
||||
pub enum EmergencyAccessStatus {
|
||||
Invited = 0,
|
||||
Accepted = 1,
|
||||
@@ -149,14 +129,9 @@ pub enum EmergencyAccessStatus {
|
||||
|
||||
// region Database methods
|
||||
|
||||
use crate::db::DbConn;
|
||||
|
||||
use crate::api::EmptyResult;
|
||||
use crate::error::MapResult;
|
||||
|
||||
impl EmergencyAccess {
|
||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn);
|
||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
||||
self.updated_at = Utc::now().naive_utc();
|
||||
|
||||
db_run! { conn:
|
||||
@@ -190,18 +165,57 @@ impl EmergencyAccess {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn) {
|
||||
ea.delete(conn)?;
|
||||
pub async fn update_access_status_and_save(
|
||||
&mut self,
|
||||
status: i32,
|
||||
date: &NaiveDateTime,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
// Update the grantee so that it will refresh it's status.
|
||||
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
|
||||
self.status = status;
|
||||
self.updated_at = date.to_owned();
|
||||
|
||||
db_run! {conn: {
|
||||
crate::util::retry(|| {
|
||||
diesel::update(emergency_access::table.filter(emergency_access::uuid.eq(&self.uuid)))
|
||||
.set((emergency_access::status.eq(status), emergency_access::updated_at.eq(date)))
|
||||
.execute(conn)
|
||||
}, 10)
|
||||
.map_res("Error updating emergency access status")
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn update_last_notification_date_and_save(
|
||||
&mut self,
|
||||
date: &NaiveDateTime,
|
||||
conn: &mut DbConn,
|
||||
) -> EmptyResult {
|
||||
self.last_notification_at = Some(date.to_owned());
|
||||
self.updated_at = date.to_owned();
|
||||
|
||||
db_run! {conn: {
|
||||
crate::util::retry(|| {
|
||||
diesel::update(emergency_access::table.filter(emergency_access::uuid.eq(&self.uuid)))
|
||||
.set((emergency_access::last_notification_at.eq(date), emergency_access::updated_at.eq(date)))
|
||||
.execute(conn)
|
||||
}, 10)
|
||||
.map_res("Error updating emergency access status")
|
||||
}}
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await {
|
||||
ea.delete(conn).await?;
|
||||
}
|
||||
for ea in Self::find_all_by_grantee_uuid(user_uuid, conn) {
|
||||
ea.delete(conn)?;
|
||||
for ea in Self::find_all_by_grantee_uuid(user_uuid, conn).await {
|
||||
ea.delete(conn).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn);
|
||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
||||
|
||||
db_run! { conn: {
|
||||
diesel::delete(emergency_access::table.filter(emergency_access::uuid.eq(self.uuid)))
|
||||
@@ -210,7 +224,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
@@ -219,11 +233,11 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||
grantor_uuid: &str,
|
||||
grantee_uuid: &str,
|
||||
email: &str,
|
||||
conn: &DbConn,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
@@ -234,15 +248,16 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_all_recoveries(conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_recoveries_initiated(conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::status.eq(EmergencyAccessStatus::RecoveryInitiated as i32))
|
||||
.filter(emergency_access::recovery_initiated_at.is_not_null())
|
||||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::uuid.eq(uuid))
|
||||
@@ -252,7 +267,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
|
||||
@@ -260,7 +275,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Option<Self> {
|
||||
pub async fn find_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Option<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::email.eq(grantee_email))
|
||||
@@ -270,7 +285,7 @@ impl EmergencyAccess {
|
||||
}}
|
||||
}
|
||||
|
||||
pub fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||
db_run! { conn: {
|
||||
emergency_access::table
|
||||
.filter(emergency_access::grantor_uuid.eq(grantor_uuid))
|
||||
|
||||