mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2026-01-16 20:50:33 +00:00
Compare commits
92 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d04b94b77d | ||
|
|
247d0706ff | ||
|
|
0e8b410798 | ||
|
|
fda77afc2a | ||
|
|
d9835f530c | ||
|
|
bd91964170 | ||
|
|
d42b264a93 | ||
|
|
a4c7fadbf4 | ||
|
|
8e2a87fd79 | ||
|
|
4233dbf3db | ||
|
|
a2bf8def2a | ||
|
|
8f05a90b96 | ||
|
|
9082e7cebb | ||
|
|
55fdee3bf8 | ||
|
|
377969ea67 | ||
|
|
f05398a6b3 | ||
|
|
9555ac7bb8 | ||
|
|
f01ef40a8e | ||
|
|
8e7b27cc36 | ||
|
|
d230ee087c | ||
|
|
f8f14727b9 | ||
|
|
753a9e0bae | ||
|
|
f5fb69b64f | ||
|
|
3261534438 | ||
|
|
46762d9fde | ||
|
|
6cadb2627a | ||
|
|
0fe93edea6 | ||
|
|
e9aa5a545e | ||
|
|
9dcc738f85 | ||
|
|
84a7c7da5d | ||
|
|
ca9234ed86 | ||
|
|
27dc67fadd | ||
|
|
2ad33ec97f | ||
|
|
e1a8df96db | ||
|
|
e42a37c6c1 | ||
|
|
129b835ac7 | ||
|
|
2d98aa3045 | ||
|
|
93636eb3c3 | ||
|
|
1e42755187 | ||
|
|
ce8efcc48f | ||
|
|
79ce5b49bc | ||
|
|
7c3cad197c | ||
|
|
000c606029 | ||
|
|
29144b2ce0 | ||
|
|
ea04b6f151 | ||
|
|
3427217686 | ||
|
|
a1fbd6d729 | ||
|
|
2cbfe6fa5b | ||
|
|
d86c4f2c23 | ||
|
|
6d73f30b4f | ||
|
|
d0c22b9fc9 | ||
|
|
d6b97090fa | ||
|
|
94b077cb2d | ||
|
|
bb2412d033 | ||
|
|
b9bdc9b8e2 | ||
|
|
897bdf8343 | ||
|
|
569add453d | ||
|
|
77cd5b5954 | ||
|
|
4438da39f9 | ||
|
|
0b2383ab56 | ||
|
|
ad1d65bdf8 | ||
|
|
3b283c289e | ||
|
|
4b9384cb2b | ||
|
|
0f39d96518 | ||
|
|
edf7484a70 | ||
|
|
8b66e34415 | ||
|
|
1d00e34bbb | ||
|
|
1b801406d6 | ||
|
|
5e46a43306 | ||
|
|
5c77431c2d | ||
|
|
2775c6ce8a | ||
|
|
890e668071 | ||
|
|
596c167312 | ||
|
|
ae3a153bdb | ||
|
|
2c36993792 | ||
|
|
d672ad3f76 | ||
|
|
a641b48884 | ||
|
|
98b2178c7d | ||
|
|
76a3f0f531 | ||
|
|
c5665e7b77 | ||
|
|
cbdcf8ef9f | ||
|
|
3337594d60 | ||
|
|
2daa8be1f1 | ||
|
|
eccb3ab947 | ||
|
|
3246251f29 | ||
|
|
8ab200224e | ||
|
|
34e00e1478 | ||
|
|
0fdda3bc2f | ||
|
|
48836501bf | ||
|
|
f863ffb89a | ||
|
|
03c6ed2e07 | ||
|
|
efc6eb0073 |
550
.env.template
550
.env.template
@@ -10,39 +10,13 @@
|
|||||||
## variable ENV_FILE can be set to the location of this file prior to starting
|
## variable ENV_FILE can be set to the location of this file prior to starting
|
||||||
## Vaultwarden.
|
## Vaultwarden.
|
||||||
|
|
||||||
|
####################
|
||||||
|
### Data folders ###
|
||||||
|
####################
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
## Database URL
|
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
|
||||||
# DATABASE_URL=data/db.sqlite3
|
|
||||||
## When using MySQL, specify an appropriate connection URI.
|
|
||||||
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
|
||||||
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
|
||||||
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
|
||||||
## or keyword/value connection string.
|
|
||||||
## Details:
|
|
||||||
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
|
||||||
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
|
||||||
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
|
||||||
|
|
||||||
## Database max connections
|
|
||||||
## Define the size of the connection pool used for connecting to the database.
|
|
||||||
# DATABASE_MAX_CONNS=10
|
|
||||||
|
|
||||||
## Database timeout
|
|
||||||
## Timeout when acquiring database connection
|
|
||||||
# DATABASE_TIMEOUT=30
|
|
||||||
|
|
||||||
## Database connection initialization
|
|
||||||
## Allows SQL statements to be run whenever a new database connection is created.
|
|
||||||
## This is mainly useful for connection-scoped pragmas.
|
|
||||||
## If empty, a database-specific default is used:
|
|
||||||
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
|
|
||||||
## - MySQL: ""
|
|
||||||
## - PostgreSQL: ""
|
|
||||||
# DATABASE_CONN_INIT=""
|
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
# ICON_CACHE_FOLDER=data/icon_cache
|
# ICON_CACHE_FOLDER=data/icon_cache
|
||||||
@@ -52,65 +26,85 @@
|
|||||||
|
|
||||||
## Templates data folder, by default uses embedded templates
|
## Templates data folder, by default uses embedded templates
|
||||||
## Check source code to see the format
|
## Check source code to see the format
|
||||||
# TEMPLATES_FOLDER=/path/to/templates
|
# TEMPLATES_FOLDER=data/templates
|
||||||
## Automatically reload the templates for every request, slow, use only for development
|
## Automatically reload the templates for every request, slow, use only for development
|
||||||
# RELOAD_TEMPLATES=false
|
# RELOAD_TEMPLATES=false
|
||||||
|
|
||||||
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
|
||||||
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
|
||||||
# IP_HEADER=X-Real-IP
|
|
||||||
|
|
||||||
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_TTL=2592000
|
|
||||||
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
|
||||||
# ICON_CACHE_NEGTTL=259200
|
|
||||||
|
|
||||||
## Web vault settings
|
## Web vault settings
|
||||||
# WEB_VAULT_FOLDER=web-vault/
|
# WEB_VAULT_FOLDER=web-vault/
|
||||||
# WEB_VAULT_ENABLED=true
|
# WEB_VAULT_ENABLED=true
|
||||||
|
|
||||||
## Enables websocket notifications
|
#########################
|
||||||
# WEBSOCKET_ENABLED=false
|
### Database settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
## Controls the WebSocket server address and port
|
## Database URL
|
||||||
# WEBSOCKET_ADDRESS=0.0.0.0
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
# WEBSOCKET_PORT=3012
|
# DATABASE_URL=data/db.sqlite3
|
||||||
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
|
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||||
|
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||||
|
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||||
|
## or keyword/value connection string.
|
||||||
|
## Details:
|
||||||
|
## - https://docs.diesel.rs/2.1.x/diesel/pg/struct.PgConnection.html
|
||||||
|
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
|
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||||
|
|
||||||
|
## Enable WAL for the DB
|
||||||
|
## Set to false to avoid enabling WAL during startup.
|
||||||
|
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
||||||
|
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
||||||
|
## Please read project wiki page about this setting first before changing the value as it can
|
||||||
|
## cause performance degradation or might render the service unable to start.
|
||||||
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Database connection retries
|
||||||
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
|
## Database timeout
|
||||||
|
## Timeout when acquiring database connection
|
||||||
|
# DATABASE_TIMEOUT=30
|
||||||
|
|
||||||
|
## Database max connections
|
||||||
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
|
# DATABASE_MAX_CONNS=10
|
||||||
|
|
||||||
|
## Database connection initialization
|
||||||
|
## Allows SQL statements to be run whenever a new database connection is created.
|
||||||
|
## This is mainly useful for connection-scoped pragmas.
|
||||||
|
## If empty, a database-specific default is used:
|
||||||
|
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
|
||||||
|
## - MySQL: ""
|
||||||
|
## - PostgreSQL: ""
|
||||||
|
# DATABASE_CONN_INIT=""
|
||||||
|
|
||||||
|
#################
|
||||||
|
### WebSocket ###
|
||||||
|
#################
|
||||||
|
|
||||||
|
## Enable websocket notifications
|
||||||
|
# ENABLE_WEBSOCKET=true
|
||||||
|
|
||||||
|
##########################
|
||||||
|
### Push notifications ###
|
||||||
|
##########################
|
||||||
|
|
||||||
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
## Enables push notifications (requires key and id from https://bitwarden.com/host)
|
||||||
# PUSH_ENABLED=true
|
## If you choose "European Union" Data Region, uncomment PUSH_RELAY_URI and PUSH_IDENTITY_URI then replace .com by .eu
|
||||||
|
## Details about mobile client push notification:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
|
||||||
|
# PUSH_ENABLED=false
|
||||||
# PUSH_INSTALLATION_ID=CHANGEME
|
# PUSH_INSTALLATION_ID=CHANGEME
|
||||||
# PUSH_INSTALLATION_KEY=CHANGEME
|
# PUSH_INSTALLATION_KEY=CHANGEME
|
||||||
## Don't change this unless you know what you're doing.
|
## Don't change this unless you know what you're doing.
|
||||||
# PUSH_RELAY_URI=https://push.bitwarden.com
|
# PUSH_RELAY_URI=https://push.bitwarden.com
|
||||||
|
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
|
||||||
|
|
||||||
## Controls whether users are allowed to create Bitwarden Sends.
|
#####################
|
||||||
## This setting applies globally to all users.
|
### Schedule jobs ###
|
||||||
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
#####################
|
||||||
# SENDS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether users can enable emergency access to their accounts.
|
|
||||||
## This setting applies globally to all users.
|
|
||||||
# EMERGENCY_ACCESS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether event logging is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
|
||||||
# ORG_EVENTS_ENABLED=false
|
|
||||||
|
|
||||||
## Controls whether users can change their email.
|
|
||||||
## This setting applies globally to all users
|
|
||||||
# EMAIL_CHANGE_ALLOWED=true
|
|
||||||
|
|
||||||
## Number of days to retain events stored in the database.
|
|
||||||
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
|
||||||
# EVENTS_DAYS_RETAIN=
|
|
||||||
|
|
||||||
## BETA FEATURE: Groups
|
|
||||||
## Controls whether group support is enabled for organizations
|
|
||||||
## This setting applies to organizations.
|
|
||||||
## Disabled by default because this is a beta feature, it contains known issues!
|
|
||||||
## KNOW WHAT YOU ARE DOING!
|
|
||||||
# ORG_GROUPS_ENABLED=false
|
|
||||||
|
|
||||||
## Job scheduler settings
|
## Job scheduler settings
|
||||||
##
|
##
|
||||||
@@ -151,60 +145,69 @@
|
|||||||
## Cron schedule of the job that cleans old events from the event table.
|
## Cron schedule of the job that cleans old events from the event table.
|
||||||
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
|
||||||
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
|
||||||
|
## Number of days to retain events stored in the database.
|
||||||
## Enable extended logging, which shows timestamps and targets in the logs
|
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
|
||||||
# EXTENDED_LOGGING=true
|
# EVENTS_DAYS_RETAIN=
|
||||||
|
|
||||||
## Timestamp format used in extended logging.
|
|
||||||
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
|
||||||
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
|
||||||
|
|
||||||
## Logging to file
|
|
||||||
# LOG_FILE=/path/to/log
|
|
||||||
|
|
||||||
## Logging to Syslog
|
|
||||||
## This requires extended logging
|
|
||||||
# USE_SYSLOG=false
|
|
||||||
|
|
||||||
## Log level
|
|
||||||
## Change the verbosity of the log output
|
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
|
||||||
## routes and static file, websocket and alive requests
|
|
||||||
# LOG_LEVEL=Info
|
|
||||||
|
|
||||||
## Enable WAL for the DB
|
|
||||||
## Set to false to avoid enabling WAL during startup.
|
|
||||||
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
|
|
||||||
## this setting only prevents Vaultwarden from automatically enabling it on start.
|
|
||||||
## Please read project wiki page about this setting first before changing the value as it can
|
|
||||||
## cause performance degradation or might render the service unable to start.
|
|
||||||
# ENABLE_DB_WAL=true
|
|
||||||
|
|
||||||
## Database connection retries
|
|
||||||
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
|
||||||
# DB_CONNECTION_RETRIES=15
|
|
||||||
|
|
||||||
## Icon service
|
|
||||||
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
|
||||||
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
|
||||||
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
|
||||||
##
|
##
|
||||||
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
## Cron schedule of the job that cleans old auth requests from the auth request.
|
||||||
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
## Defaults to every minute. Set blank to disable this job.
|
||||||
## redirect to the corresponding icon at the external service. An external service may
|
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
|
||||||
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
|
||||||
## you are concerned that someone may probe your instance to try to detect whether icons
|
|
||||||
## for certain sites have been cached.
|
|
||||||
# ICON_SERVICE=internal
|
|
||||||
|
|
||||||
## Icon redirect code
|
########################
|
||||||
## The HTTP status code to use for redirects to an external icon service.
|
### General settings ###
|
||||||
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
########################
|
||||||
## Temporary redirects are useful while testing different icon services, but once a service
|
|
||||||
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
## Domain settings
|
||||||
## are currently better supported by the Bitwarden clients.
|
## The domain must match the address from where you access the server
|
||||||
# ICON_REDIRECT_CODE=302
|
## It's recommended to configure this value, otherwise certain functionality might not work,
|
||||||
|
## like attachment downloads, email links and U2F.
|
||||||
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
|
## To use HTTPS, the recommended way is to put Vaultwarden behind a reverse proxy
|
||||||
|
## Details:
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS
|
||||||
|
## - https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples
|
||||||
|
## For development
|
||||||
|
# DOMAIN=http://localhost
|
||||||
|
## For public server
|
||||||
|
# DOMAIN=https://vw.domain.tld
|
||||||
|
## For public server (URL with port number)
|
||||||
|
# DOMAIN=https://vw.domain.tld:8443
|
||||||
|
## For public server (URL with path)
|
||||||
|
# DOMAIN=https://domain.tld/vw
|
||||||
|
|
||||||
|
## Controls whether users are allowed to create Bitwarden Sends.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
## To control this on a per-org basis instead, use the "Disable Send" org policy.
|
||||||
|
# SENDS_ALLOWED=true
|
||||||
|
|
||||||
|
## HIBP Api Key
|
||||||
|
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
# HIBP_API_KEY=
|
||||||
|
|
||||||
|
## Per-organization attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per organization.
|
||||||
|
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
||||||
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user attachment storage limit (KB)
|
||||||
|
## Max kilobytes of attachment storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further attachments.
|
||||||
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user send storage limit (KB)
|
||||||
|
## Max kilobytes of send storage allowed per user.
|
||||||
|
## When this limit is reached, the user will not be allowed to upload further sends.
|
||||||
|
# USER_SEND_LIMIT=
|
||||||
|
|
||||||
|
## Number of days to wait before auto-deleting a trashed item.
|
||||||
|
## If unset (the default), trashed items are not auto-deleted.
|
||||||
|
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
||||||
|
# TRASH_AUTO_DELETE_DAYS=
|
||||||
|
|
||||||
|
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
||||||
|
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
||||||
|
## master password was provided but the required 2FA step was not completed, which
|
||||||
|
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# INCOMPLETE_2FA_TIME_LIMIT=3
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading in the internal icon service.
|
## Set to true to disable icon downloading in the internal icon service.
|
||||||
@@ -213,38 +216,6 @@
|
|||||||
## will be deleted eventually, but won't be downloaded again.
|
## will be deleted eventually, but won't be downloaded again.
|
||||||
# DISABLE_ICON_DOWNLOAD=false
|
# DISABLE_ICON_DOWNLOAD=false
|
||||||
|
|
||||||
## Icon download timeout
|
|
||||||
## Configure the timeout value when downloading the favicons.
|
|
||||||
## The default is 10 seconds, but this could be to low on slower network connections
|
|
||||||
# ICON_DOWNLOAD_TIMEOUT=10
|
|
||||||
|
|
||||||
## Icon blacklist Regex
|
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
|
||||||
## NOTE: Always enclose this regex withing single quotes!
|
|
||||||
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
|
||||||
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
|
||||||
|
|
||||||
## Disable 2FA remember
|
|
||||||
## Enabling this would force the users to use a second factor to login every time.
|
|
||||||
## Note that the checkbox would still be present, but ignored.
|
|
||||||
# DISABLE_2FA_REMEMBER=false
|
|
||||||
|
|
||||||
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
|
||||||
# EMAIL_ATTEMPTS_LIMIT=3
|
|
||||||
|
|
||||||
## Token expiration time
|
|
||||||
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
|
||||||
# EMAIL_EXPIRATION_TIME=600
|
|
||||||
|
|
||||||
## Email token size
|
|
||||||
## Number of digits in an email 2FA token (min: 6, max: 255).
|
|
||||||
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
|
||||||
# EMAIL_TOKEN_SIZE=6
|
|
||||||
|
|
||||||
## Controls if new users can register
|
## Controls if new users can register
|
||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
@@ -266,6 +237,11 @@
|
|||||||
## even if SIGNUPS_ALLOWED is set to false
|
## even if SIGNUPS_ALLOWED is set to false
|
||||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||||
|
|
||||||
|
## Controls whether event logging is enabled for organizations
|
||||||
|
## This setting applies to organizations.
|
||||||
|
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
|
||||||
|
# ORG_EVENTS_ENABLED=false
|
||||||
|
|
||||||
## Controls which users can create new orgs.
|
## Controls which users can create new orgs.
|
||||||
## Blank or 'all' means all users can create orgs (this is the default):
|
## Blank or 'all' means all users can create orgs (this is the default):
|
||||||
# ORG_CREATION_USERS=
|
# ORG_CREATION_USERS=
|
||||||
@@ -274,6 +250,122 @@
|
|||||||
## A comma-separated list means only those users can create orgs:
|
## A comma-separated list means only those users can create orgs:
|
||||||
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
|
# INVITATIONS_ALLOWED=true
|
||||||
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
|
# INVITATION_ORG_NAME=Vaultwarden
|
||||||
|
|
||||||
|
## The number of hours after which an organization invite token, emergency access invite token,
|
||||||
|
## email verification token and deletion request token will expire (must be at least 1)
|
||||||
|
# INVITATION_EXPIRATION_HOURS=120
|
||||||
|
|
||||||
|
## Controls whether users can enable emergency access to their accounts.
|
||||||
|
## This setting applies globally to all users.
|
||||||
|
# EMERGENCY_ACCESS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether users can change their email.
|
||||||
|
## This setting applies globally to all users
|
||||||
|
# EMAIL_CHANGE_ALLOWED=true
|
||||||
|
|
||||||
|
## Number of server-side passwords hashing iterations for the password hash.
|
||||||
|
## The default for new users. If changed, it will be updated during login for existing users.
|
||||||
|
# PASSWORD_ITERATIONS=600000
|
||||||
|
|
||||||
|
## Controls whether users can set password hints. This setting applies globally to all users.
|
||||||
|
# PASSWORD_HINTS_ALLOWED=true
|
||||||
|
|
||||||
|
## Controls whether a password hint should be shown directly in the web page if
|
||||||
|
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
||||||
|
## as this provides unauthenticated access to potentially sensitive data.
|
||||||
|
# SHOW_PASSWORD_HINT=false
|
||||||
|
|
||||||
|
#########################
|
||||||
|
### Advanced settings ###
|
||||||
|
#########################
|
||||||
|
|
||||||
|
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
|
||||||
|
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
|
||||||
|
# IP_HEADER=X-Real-IP
|
||||||
|
|
||||||
|
## Icon service
|
||||||
|
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
|
||||||
|
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
|
||||||
|
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
|
||||||
|
##
|
||||||
|
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
|
||||||
|
## If an external service is set, an icon request to Vaultwarden will return an HTTP
|
||||||
|
## redirect to the corresponding icon at the external service. An external service may
|
||||||
|
## be useful if your Vaultwarden instance has no external network connectivity, or if
|
||||||
|
## you are concerned that someone may probe your instance to try to detect whether icons
|
||||||
|
## for certain sites have been cached.
|
||||||
|
# ICON_SERVICE=internal
|
||||||
|
|
||||||
|
## Icon redirect code
|
||||||
|
## The HTTP status code to use for redirects to an external icon service.
|
||||||
|
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
|
||||||
|
## Temporary redirects are useful while testing different icon services, but once a service
|
||||||
|
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
|
||||||
|
## are currently better supported by the Bitwarden clients.
|
||||||
|
# ICON_REDIRECT_CODE=302
|
||||||
|
|
||||||
|
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (30 days)
|
||||||
|
# ICON_CACHE_TTL=2592000
|
||||||
|
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
|
||||||
|
## Default: 2592000 (3 days)
|
||||||
|
# ICON_CACHE_NEGTTL=259200
|
||||||
|
|
||||||
|
## Icon download timeout
|
||||||
|
## Configure the timeout value when downloading the favicons.
|
||||||
|
## The default is 10 seconds, but this could be to low on slower network connections
|
||||||
|
# ICON_DOWNLOAD_TIMEOUT=10
|
||||||
|
|
||||||
|
## Icon blacklist Regex
|
||||||
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
|
## Any IP which is not defined as a global IP will be blacklisted.
|
||||||
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
|
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
|
## Client Settings
|
||||||
|
## Enable experimental feature flags for clients.
|
||||||
|
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
|
||||||
|
##
|
||||||
|
## The following flags are available:
|
||||||
|
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
|
||||||
|
## - "autofill-v2": Use the new autofill implementation.
|
||||||
|
## - "browser-fileless-import": Directly import credentials from other providers without a file.
|
||||||
|
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
|
||||||
|
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
|
||||||
|
|
||||||
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
|
## If sending the email fails the login attempt will fail!!
|
||||||
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
|
||||||
|
## Enable extended logging, which shows timestamps and targets in the logs
|
||||||
|
# EXTENDED_LOGGING=true
|
||||||
|
|
||||||
|
## Timestamp format used in extended logging.
|
||||||
|
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
|
||||||
|
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
|
||||||
|
|
||||||
|
## Logging to Syslog
|
||||||
|
## This requires extended logging
|
||||||
|
# USE_SYSLOG=false
|
||||||
|
|
||||||
|
## Logging to file
|
||||||
|
# LOG_FILE=/path/to/log
|
||||||
|
|
||||||
|
## Log level
|
||||||
|
## Change the verbosity of the log output
|
||||||
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
|
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||||
|
## routes and static file, websocket and alive requests
|
||||||
|
# LOG_LEVEL=info
|
||||||
|
|
||||||
## Token for the admin interface, preferably an Argon2 PCH string
|
## Token for the admin interface, preferably an Argon2 PCH string
|
||||||
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
|
||||||
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
|
||||||
@@ -289,54 +381,13 @@
|
|||||||
## meant to be used with the use of a separate auth layer in front
|
## meant to be used with the use of a separate auth layer in front
|
||||||
# DISABLE_ADMIN_TOKEN=false
|
# DISABLE_ADMIN_TOKEN=false
|
||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
||||||
# INVITATIONS_ALLOWED=true
|
# ADMIN_RATELIMIT_SECONDS=300
|
||||||
## Name shown in the invitation emails that don't come from a specific organization
|
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
||||||
# INVITATION_ORG_NAME=Vaultwarden
|
# ADMIN_RATELIMIT_MAX_BURST=3
|
||||||
|
|
||||||
## The number of hours after which an organization invite token, emergency access invite token,
|
## Set the lifetime of admin sessions to this value (in minutes).
|
||||||
## email verification token and deletion request token will expire (must be at least 1)
|
# ADMIN_SESSION_LIFETIME=20
|
||||||
# INVITATION_EXPIRATION_HOURS=120
|
|
||||||
|
|
||||||
## Per-organization attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per organization.
|
|
||||||
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
|
|
||||||
# ORG_ATTACHMENT_LIMIT=
|
|
||||||
## Per-user attachment storage limit (KB)
|
|
||||||
## Max kilobytes of attachment storage allowed per user.
|
|
||||||
## When this limit is reached, the user will not be allowed to upload further attachments.
|
|
||||||
# USER_ATTACHMENT_LIMIT=
|
|
||||||
|
|
||||||
## Number of days to wait before auto-deleting a trashed item.
|
|
||||||
## If unset (the default), trashed items are not auto-deleted.
|
|
||||||
## This setting applies globally, so make sure to inform all users of any changes to this setting.
|
|
||||||
# TRASH_AUTO_DELETE_DAYS=
|
|
||||||
|
|
||||||
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
|
|
||||||
## resulting in an email notification. An incomplete 2FA login is one where the correct
|
|
||||||
## master password was provided but the required 2FA step was not completed, which
|
|
||||||
## potentially indicates a master password compromise. Set to 0 to disable this check.
|
|
||||||
## This setting applies globally to all users.
|
|
||||||
# INCOMPLETE_2FA_TIME_LIMIT=3
|
|
||||||
|
|
||||||
## Number of server-side passwords hashing iterations for the password hash.
|
|
||||||
## The default for new users. If changed, it will be updated during login for existing users.
|
|
||||||
# PASSWORD_ITERATIONS=350000
|
|
||||||
|
|
||||||
## Controls whether users can set password hints. This setting applies globally to all users.
|
|
||||||
# PASSWORD_HINTS_ALLOWED=true
|
|
||||||
|
|
||||||
## Controls whether a password hint should be shown directly in the web page if
|
|
||||||
## SMTP service is not configured. Not recommended for publicly-accessible instances
|
|
||||||
## as this provides unauthenticated access to potentially sensitive data.
|
|
||||||
# SHOW_PASSWORD_HINT=false
|
|
||||||
|
|
||||||
## Domain settings
|
|
||||||
## The domain must match the address from where you access the server
|
|
||||||
## It's recommended to configure this value, otherwise certain functionality might not work,
|
|
||||||
## like attachment downloads, email links and U2F.
|
|
||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
|
||||||
# DOMAIN=https://vw.domain.tld:8443
|
|
||||||
|
|
||||||
## Allowed iframe ancestors (Know the risks!)
|
## Allowed iframe ancestors (Know the risks!)
|
||||||
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||||
@@ -351,13 +402,16 @@
|
|||||||
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
|
||||||
# LOGIN_RATELIMIT_MAX_BURST=10
|
# LOGIN_RATELIMIT_MAX_BURST=10
|
||||||
|
|
||||||
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
|
## BETA FEATURE: Groups
|
||||||
# ADMIN_RATELIMIT_SECONDS=300
|
## Controls whether group support is enabled for organizations
|
||||||
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
|
## This setting applies to organizations.
|
||||||
# ADMIN_RATELIMIT_MAX_BURST=3
|
## Disabled by default because this is a beta feature, it contains known issues!
|
||||||
|
## KNOW WHAT YOU ARE DOING!
|
||||||
|
# ORG_GROUPS_ENABLED=false
|
||||||
|
|
||||||
## Set the lifetime of admin sessions to this value (in minutes).
|
########################
|
||||||
# ADMIN_SESSION_LIFETIME=20
|
### MFA/2FA settings ###
|
||||||
|
########################
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
@@ -378,6 +432,30 @@
|
|||||||
## After that, you should be able to follow the rest of the guide linked above,
|
## After that, you should be able to follow the rest of the guide linked above,
|
||||||
## ignoring the fields that ask for the values that you already configured beforehand.
|
## ignoring the fields that ask for the values that you already configured beforehand.
|
||||||
|
|
||||||
|
## Email 2FA settings
|
||||||
|
## Email token size
|
||||||
|
## Number of digits in an email 2FA token (min: 6, max: 255).
|
||||||
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
##
|
||||||
|
## Token expiration time
|
||||||
|
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
##
|
||||||
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
##
|
||||||
|
## Setup email 2FA regardless of any organization policy
|
||||||
|
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
|
||||||
|
## Automatically setup email 2FA as fallback provider when needed
|
||||||
|
# EMAIL_2FA_AUTO_FALLBACK=false
|
||||||
|
|
||||||
|
## Other MFA/2FA settings
|
||||||
|
## Disable 2FA remember
|
||||||
|
## Enabling this would force the users to use a second factor to login every time.
|
||||||
|
## Note that the checkbox would still be present, but ignored.
|
||||||
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
##
|
||||||
## Authenticator Settings
|
## Authenticator Settings
|
||||||
## Disable authenticator time drifted codes to be valid.
|
## Disable authenticator time drifted codes to be valid.
|
||||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||||
@@ -390,12 +468,9 @@
|
|||||||
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
|
||||||
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
|
||||||
|
|
||||||
## Rocket specific settings
|
###########################
|
||||||
## See https://rocket.rs/v0.4/guide/configuration/ for more details.
|
### SMTP Email settings ###
|
||||||
# ROCKET_ADDRESS=0.0.0.0
|
###########################
|
||||||
# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise.
|
|
||||||
# ROCKET_WORKERS=10
|
|
||||||
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
|
||||||
|
|
||||||
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
|
||||||
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
|
||||||
@@ -403,12 +478,19 @@
|
|||||||
# SMTP_HOST=smtp.domain.tld
|
# SMTP_HOST=smtp.domain.tld
|
||||||
# SMTP_FROM=vaultwarden@domain.tld
|
# SMTP_FROM=vaultwarden@domain.tld
|
||||||
# SMTP_FROM_NAME=Vaultwarden
|
# SMTP_FROM_NAME=Vaultwarden
|
||||||
# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25)
|
|
||||||
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
|
||||||
# SMTP_USERNAME=username
|
# SMTP_USERNAME=username
|
||||||
# SMTP_PASSWORD=password
|
# SMTP_PASSWORD=password
|
||||||
# SMTP_TIMEOUT=15
|
# SMTP_TIMEOUT=15
|
||||||
|
|
||||||
|
## Choose the type of secure connection for SMTP. The default is "starttls".
|
||||||
|
## The available options are:
|
||||||
|
## - "starttls": The default port is 587.
|
||||||
|
## - "force_tls": The default port is 465.
|
||||||
|
## - "off": The default port is 25.
|
||||||
|
## Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
|
||||||
|
# SMTP_SECURITY=starttls
|
||||||
|
# SMTP_PORT=587
|
||||||
|
|
||||||
# Whether to send mail via the `sendmail` command
|
# Whether to send mail via the `sendmail` command
|
||||||
# USE_SENDMAIL=false
|
# USE_SENDMAIL=false
|
||||||
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
# Which sendmail command to use. The one found in the $PATH is used if not specified.
|
||||||
@@ -417,7 +499,7 @@
|
|||||||
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||||
## Possible values: ["Plain", "Login", "Xoauth2"].
|
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||||
## Multiple options need to be separated by a comma ','.
|
## Multiple options need to be separated by a comma ','.
|
||||||
# SMTP_AUTH_MECHANISM="Plain"
|
# SMTP_AUTH_MECHANISM=
|
||||||
|
|
||||||
## Server name sent during the SMTP HELO
|
## Server name sent during the SMTP HELO
|
||||||
## By default this value should be is on the machine's hostname,
|
## By default this value should be is on the machine's hostname,
|
||||||
@@ -425,30 +507,34 @@
|
|||||||
# HELO_NAME=
|
# HELO_NAME=
|
||||||
|
|
||||||
## Embed images as email attachments
|
## Embed images as email attachments
|
||||||
# SMTP_EMBED_IMAGES=false
|
# SMTP_EMBED_IMAGES=true
|
||||||
|
|
||||||
## SMTP debugging
|
## SMTP debugging
|
||||||
## When set to true this will output very detailed SMTP messages.
|
## When set to true this will output very detailed SMTP messages.
|
||||||
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
# SMTP_DEBUG=false
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
## Accept Invalid Hostnames
|
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
|
||||||
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
|
||||||
|
|
||||||
## Accept Invalid Certificates
|
## Accept Invalid Certificates
|
||||||
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
## Only use this as a last resort if you are not able to use a valid certificate.
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
# SMTP_ACCEPT_INVALID_CERTS=false
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
## Require new device emails. When a user logs in an email is required to be sent.
|
## Accept Invalid Hostnames
|
||||||
## If sending the email fails the login attempt will fail!!
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
# REQUIRE_DEVICE_EMAIL=false
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
#######################
|
||||||
|
### Rocket settings ###
|
||||||
|
#######################
|
||||||
|
|
||||||
|
## Rocket specific settings
|
||||||
|
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
|
||||||
|
# ROCKET_ADDRESS=0.0.0.0
|
||||||
|
## The default port is 8000, unless running in a Docker container, in which case it is 80.
|
||||||
|
# ROCKET_PORT=8000
|
||||||
|
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
|
||||||
|
|
||||||
## HIBP Api Key
|
|
||||||
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
|
||||||
# HIBP_API_KEY=
|
|
||||||
|
|
||||||
# vim: syntax=ini
|
# vim: syntax=ini
|
||||||
|
|||||||
3
.github/CODEOWNERS
vendored
Normal file
3
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
/.github @dani-garcia @BlackDex
|
||||||
|
/.github/CODEOWNERS @dani-garcia @BlackDex
|
||||||
|
/.github/workflows/** @dani-garcia @BlackDex
|
||||||
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: "Checkout"
|
- name: "Checkout"
|
||||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
@@ -74,7 +74,7 @@ jobs:
|
|||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
|
uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master @ Jun 13, 2024, 6:20 PM GMT+2
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -84,7 +84,7 @@ jobs:
|
|||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2
|
uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master @ Jun 13, 2024, 6:20 PM GMT+2
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
@@ -106,7 +106,7 @@ jobs:
|
|||||||
# End Show environment
|
# End Show environment
|
||||||
|
|
||||||
# Enable Rust Caching
|
# Enable Rust Caching
|
||||||
- uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
|
- uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3
|
||||||
with:
|
with:
|
||||||
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
|
||||||
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
# Like changing the build host from Ubuntu 20.04 to 22.04 for example.
|
||||||
|
|||||||
2
.github/workflows/hadolint.yml
vendored
2
.github/workflows/hadolint.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
|
|||||||
140
.github/workflows/release.yml
vendored
140
.github/workflows/release.yml
vendored
@@ -2,21 +2,10 @@ name: Release
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
branches:
|
||||||
- ".github/workflows/release.yml"
|
|
||||||
- "src/**"
|
|
||||||
- "migrations/**"
|
|
||||||
- "docker/**"
|
|
||||||
- "Cargo.*"
|
|
||||||
- "build.rs"
|
|
||||||
- "diesel.toml"
|
|
||||||
- "rust-toolchain.toml"
|
|
||||||
|
|
||||||
branches: # Only on paths above
|
|
||||||
- main
|
- main
|
||||||
- release-build-revision
|
|
||||||
|
|
||||||
tags: # Always, regardless of paths above
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -31,7 +20,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Skip Duplicates Actions
|
- name: Skip Duplicates Actions
|
||||||
id: skip_check
|
id: skip_check
|
||||||
uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0
|
uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
|
||||||
with:
|
with:
|
||||||
cancel_others: 'true'
|
cancel_others: 'true'
|
||||||
# Only run this when not creating a tag
|
# Only run this when not creating a tag
|
||||||
@@ -42,12 +31,12 @@ jobs:
|
|||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs: skip_check
|
needs: skip_check
|
||||||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
|
||||||
# TODO: Start a local docker registry to be used to extract the final Alpine static build images
|
# Start a local docker registry to extract the final Alpine static build binaries
|
||||||
# services:
|
services:
|
||||||
# registry:
|
registry:
|
||||||
# image: registry:2
|
image: registry:2
|
||||||
# ports:
|
ports:
|
||||||
# - 5000:5000
|
- 5000:5000
|
||||||
env:
|
env:
|
||||||
SOURCE_COMMIT: ${{ github.sha }}
|
SOURCE_COMMIT: ${{ github.sha }}
|
||||||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
|
||||||
@@ -69,7 +58,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -80,13 +69,13 @@ jobs:
|
|||||||
|
|
||||||
# Start Docker Buildx
|
# Start Docker Buildx
|
||||||
- name: Setup Docker Buildx
|
- name: Setup Docker Buildx
|
||||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
|
||||||
# https://github.com/moby/buildkit/issues/3969
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
# Also set max parallelism to 3, the default of 4 breaks GitHub Actions and causes OOMKills
|
||||||
with:
|
with:
|
||||||
config-inline: |
|
buildkitd-config-inline: |
|
||||||
[worker.oci]
|
[worker.oci]
|
||||||
max-parallelism = 2
|
max-parallelism = 3
|
||||||
driver-opts: |
|
driver-opts: |
|
||||||
network=host
|
network=host
|
||||||
|
|
||||||
@@ -113,7 +102,7 @@ jobs:
|
|||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
@@ -127,7 +116,7 @@ jobs:
|
|||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
@@ -142,7 +131,7 @@ jobs:
|
|||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
@@ -155,8 +144,28 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
- name: Configure build cache from/to
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
#
|
||||||
|
# Check if there is a GitHub Container Registry Login and use it for caching
|
||||||
|
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
|
||||||
|
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
|
||||||
|
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
|
||||||
|
else
|
||||||
|
echo "BAKE_CACHE_FROM="
|
||||||
|
echo "BAKE_CACHE_TO="
|
||||||
|
fi
|
||||||
|
#
|
||||||
|
|
||||||
|
- name: Add localhost registry
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
- name: Bake ${{ matrix.base_image }} containers
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
uses: docker/bake-action@511fde2517761e303af548ec9e0ea74a8a100112 # v4.0.0
|
uses: docker/bake-action@1c5f18a523c4c68524cfbc5161494d8bb5b29d20 # v5.0.1
|
||||||
env:
|
env:
|
||||||
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
@@ -168,3 +177,76 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
files: docker/docker-bake.hcl
|
files: docker/docker-bake.hcl
|
||||||
targets: "${{ matrix.base_image }}-multi"
|
targets: "${{ matrix.base_image }}-multi"
|
||||||
|
set: |
|
||||||
|
*.cache-from=${{ env.BAKE_CACHE_FROM }}
|
||||||
|
*.cache-to=${{ env.BAKE_CACHE_TO }}
|
||||||
|
|
||||||
|
|
||||||
|
# Extract the Alpine binaries from the containers
|
||||||
|
- name: Extract binaries
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# Check which main tag we are going to build determined by github.ref_type
|
||||||
|
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||||
|
EXTRACT_TAG="latest"
|
||||||
|
elif [[ "${{ github.ref_type }}" == "branch" ]]; then
|
||||||
|
EXTRACT_TAG="testing"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# After each extraction the image is removed.
|
||||||
|
# This is needed because using different platforms doesn't trigger a new pull/download
|
||||||
|
|
||||||
|
# Extract amd64 binary
|
||||||
|
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
docker cp amd64:/vaultwarden vaultwarden-amd64
|
||||||
|
docker rm --force amd64
|
||||||
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
|
||||||
|
# Extract arm64 binary
|
||||||
|
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
docker cp arm64:/vaultwarden vaultwarden-arm64
|
||||||
|
docker rm --force arm64
|
||||||
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
|
||||||
|
# Extract armv7 binary
|
||||||
|
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
docker cp armv7:/vaultwarden vaultwarden-armv7
|
||||||
|
docker rm --force armv7
|
||||||
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
|
||||||
|
# Extract armv6 binary
|
||||||
|
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
docker cp armv6:/vaultwarden vaultwarden-armv6
|
||||||
|
docker rm --force armv6
|
||||||
|
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
|
||||||
|
|
||||||
|
# Upload artifacts to Github Actions
|
||||||
|
- name: "Upload amd64 artifact"
|
||||||
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
|
with:
|
||||||
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||||
|
path: vaultwarden-amd64
|
||||||
|
|
||||||
|
- name: "Upload arm64 artifact"
|
||||||
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
|
with:
|
||||||
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||||
|
path: vaultwarden-arm64
|
||||||
|
|
||||||
|
- name: "Upload armv7 artifact"
|
||||||
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
|
with:
|
||||||
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||||
|
path: vaultwarden-armv7
|
||||||
|
|
||||||
|
- name: "Upload armv6 artifact"
|
||||||
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
|
with:
|
||||||
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||||
|
path: vaultwarden-armv6
|
||||||
|
# End Upload artifacts to Github Actions
|
||||||
|
|||||||
26
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
26
.github/workflows/releasecache-cleanup.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
manual_trigger:
|
||||||
|
description: "Manual trigger buildcache cleanup"
|
||||||
|
required: false
|
||||||
|
default: ""
|
||||||
|
|
||||||
|
schedule:
|
||||||
|
- cron: '0 1 * * FRI'
|
||||||
|
|
||||||
|
name: Cleanup
|
||||||
|
jobs:
|
||||||
|
releasecache-cleanup:
|
||||||
|
name: Releasecache Cleanup
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
continue-on-error: true
|
||||||
|
timeout-minutes: 30
|
||||||
|
steps:
|
||||||
|
- name: Delete vaultwarden-buildcache containers
|
||||||
|
uses: actions/delete-package-versions@e5bc658cc4c965c472efe991f8beea3981499c55 # v5.0.0
|
||||||
|
with:
|
||||||
|
package-name: 'vaultwarden-buildcache'
|
||||||
|
package-type: 'container'
|
||||||
|
min-versions-to-keep: 0
|
||||||
|
delete-only-untagged-versions: 'false'
|
||||||
7
.github/workflows/trivy.yml
vendored
7
.github/workflows/trivy.yml
vendored
@@ -4,7 +4,6 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- release-build-revision
|
|
||||||
tags:
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -26,10 +25,10 @@ jobs:
|
|||||||
actions: read
|
actions: read
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
|
||||||
|
|
||||||
- name: Run Trivy vulnerability scanner
|
- name: Run Trivy vulnerability scanner
|
||||||
uses: aquasecurity/trivy-action@f78e9ecf42a1271402d4f484518b9313235990e1 # v0.13.1
|
uses: aquasecurity/trivy-action@7c2007bcb556501da015201bcba5aa14069b74e2 # v0.23.0
|
||||||
with:
|
with:
|
||||||
scan-type: repo
|
scan-type: repo
|
||||||
ignore-unfixed: true
|
ignore-unfixed: true
|
||||||
@@ -38,6 +37,6 @@ jobs:
|
|||||||
severity: CRITICAL,HIGH
|
severity: CRITICAL,HIGH
|
||||||
|
|
||||||
- name: Upload Trivy scan results to GitHub Security tab
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
uses: github/codeql-action/upload-sarif@bad341350a2f5616f9e048e51360cedc49181ce8 # v2.22.4
|
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.10
|
||||||
with:
|
with:
|
||||||
sarif_file: 'trivy-results.sarif'
|
sarif_file: 'trivy-results.sarif'
|
||||||
|
|||||||
2146
Cargo.lock
generated
2146
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
177
Cargo.toml
177
Cargo.toml
@@ -3,7 +3,7 @@ name = "vaultwarden"
|
|||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.71.1"
|
rust-version = "1.78.0"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
@@ -36,11 +36,11 @@ unstable = []
|
|||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
syslog = "6.1.0"
|
syslog = "6.1.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.20"
|
log = "0.4.22"
|
||||||
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
|
||||||
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
|
||||||
|
|
||||||
@@ -48,63 +48,62 @@ tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and
|
|||||||
dotenvy = { version = "0.15.7", default-features = false }
|
dotenvy = { version = "0.15.7", default-features = false }
|
||||||
|
|
||||||
# Lazy initialization
|
# Lazy initialization
|
||||||
once_cell = "1.18.0"
|
once_cell = "1.19.0"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.17"
|
num-traits = "0.2.19"
|
||||||
num-derive = "0.4.1"
|
num-derive = "0.4.2"
|
||||||
|
bigdecimal = "0.4.5"
|
||||||
|
|
||||||
# Web framework
|
# Web framework
|
||||||
rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false }
|
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
|
||||||
# rocket_ws = { version ="0.1.0-rc.3" }
|
rocket_ws = { version ="0.1.1" }
|
||||||
rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch
|
|
||||||
|
|
||||||
# WebSockets libraries
|
# WebSockets libraries
|
||||||
tokio-tungstenite = "0.19.0"
|
rmpv = "1.3.0" # MessagePack library
|
||||||
rmpv = "1.0.1" # MessagePack library
|
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "5.5.3"
|
dashmap = "6.0.1"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.28"
|
futures = "0.3.30"
|
||||||
tokio = { version = "1.33.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
tokio = { version = "1.38.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.189", features = ["derive"] }
|
serde = { version = "1.0.204", features = ["derive"] }
|
||||||
serde_json = "1.0.107"
|
serde_json = "1.0.120"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.1.3", features = ["chrono", "r2d2"] }
|
diesel = { version = "2.2.1", features = ["chrono", "r2d2", "numeric"] }
|
||||||
diesel_migrations = "2.1.0"
|
diesel_migrations = "2.2.0"
|
||||||
diesel_logger = { version = "0.3.0", optional = true }
|
diesel_logger = { version = "0.3.0", optional = true }
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.28.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
ring = "0.17.5"
|
ring = "0.17.8"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.5.0", features = ["v4"] }
|
uuid = { version = "1.9.1", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.31", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.8.3"
|
chrono-tz = "0.9.0"
|
||||||
time = "0.3.30"
|
time = "0.3.36"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
job_scheduler_ng = "2.0.4"
|
job_scheduler_ng = "2.0.5"
|
||||||
|
|
||||||
# Data encoding library Hex/Base32/Base64
|
# Data encoding library Hex/Base32/Base64
|
||||||
data-encoding = "2.4.0"
|
data-encoding = "2.6.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "9.0.0"
|
jsonwebtoken = "9.3.0"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
totp-lite = "2.0.0"
|
totp-lite = "2.0.1"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false }
|
||||||
@@ -113,71 +112,64 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
|
|||||||
webauthn-rs = "0.3.2"
|
webauthn-rs = "0.3.2"
|
||||||
|
|
||||||
# Handling of URL's for WebAuthn and favicons
|
# Handling of URL's for WebAuthn and favicons
|
||||||
url = "2.4.1"
|
url = "2.5.2"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.11.0", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||||
percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.4"
|
email_address = "0.2.5"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "4.4.0", features = ["dir_source"] }
|
handlebars = { version = "5.1.2", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.11.22", features = ["stream", "json", "deflate", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
reqwest = { version = "0.12.5", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||||
|
hickory-resolver = "0.24.1"
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.7"
|
html5gum = "0.5.7"
|
||||||
regex = { version = "1.10.2", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.10.5", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.3.0"
|
data-url = "0.3.1"
|
||||||
bytes = "1.5.0"
|
bytes = "1.6.0"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = { version = "0.46.0", features = ["async"] }
|
cached = { version = "0.52.0", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.16.2"
|
cookie = "0.18.1"
|
||||||
cookie_store = "0.19.1"
|
cookie_store = "0.21.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
openssl = "0.10.57"
|
openssl = "0.10.64"
|
||||||
# Set openssl-sys fixed to v0.9.92 to prevent building issues with musl, arm and 32bit pointer width
|
|
||||||
# It will force add a dynamically linked library which prevents the build from being static
|
|
||||||
openssl-sys = "=0.9.92"
|
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
pico-args = "0.5.0"
|
pico-args = "0.5.0"
|
||||||
|
|
||||||
# Macro ident concatenation
|
# Macro ident concatenation
|
||||||
paste = "1.0.14"
|
paste = "1.0.15"
|
||||||
governor = "0.6.0"
|
governor = "0.6.3"
|
||||||
|
|
||||||
# Check client versions for specific features.
|
# Check client versions for specific features.
|
||||||
semver = "1.0.20"
|
semver = "1.0.23"
|
||||||
|
|
||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "5.0.0"
|
which = "6.0.1"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.2"
|
argon2 = "0.5.3"
|
||||||
|
|
||||||
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
|
||||||
rpassword = "7.2.0"
|
rpassword = "7.3.1"
|
||||||
|
|
||||||
|
|
||||||
[patch.crates-io]
|
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch
|
|
||||||
|
|
||||||
|
|
||||||
# Strip debuginfo from the release builds
|
# Strip debuginfo from the release builds
|
||||||
# Also enable thin LTO for some optimizations
|
# The symbols are the provide better panic traces
|
||||||
|
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||||
[profile.release]
|
[profile.release]
|
||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
lto = "thin"
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
|
||||||
# A little bit of a speedup
|
# A little bit of a speedup
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
@@ -187,3 +179,68 @@ split-debuginfo = "unpacked"
|
|||||||
# This is a huge speed improvement during testing
|
# This is a huge speed improvement during testing
|
||||||
[profile.dev.package.argon2]
|
[profile.dev.package.argon2]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
|
|
||||||
|
# Optimize for size
|
||||||
|
[profile.release-micro]
|
||||||
|
inherits = "release"
|
||||||
|
opt-level = "z"
|
||||||
|
strip = "symbols"
|
||||||
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
panic = "abort"
|
||||||
|
|
||||||
|
# Profile for systems with low resources
|
||||||
|
# It will use less resources during build
|
||||||
|
[profile.release-low]
|
||||||
|
inherits = "release"
|
||||||
|
strip = "symbols"
|
||||||
|
lto = "thin"
|
||||||
|
codegen-units = 16
|
||||||
|
|
||||||
|
# Linting config
|
||||||
|
[lints.rust]
|
||||||
|
# Forbid
|
||||||
|
unsafe_code = "forbid"
|
||||||
|
non_ascii_idents = "forbid"
|
||||||
|
|
||||||
|
# Deny
|
||||||
|
future_incompatible = { level = "deny", priority = -1 }
|
||||||
|
noop_method_call = "deny"
|
||||||
|
rust_2018_idioms = { level = "deny", priority = -1 }
|
||||||
|
rust_2021_compatibility = { level = "deny", priority = -1 }
|
||||||
|
trivial_casts = "deny"
|
||||||
|
trivial_numeric_casts = "deny"
|
||||||
|
unused = { level = "deny", priority = -1 }
|
||||||
|
unused_import_braces = "deny"
|
||||||
|
unused_lifetimes = "deny"
|
||||||
|
deprecated_in_future = "deny"
|
||||||
|
|
||||||
|
[lints.clippy]
|
||||||
|
# Allow
|
||||||
|
# We need this since Rust v1.76+, since it has some bugs
|
||||||
|
# https://github.com/rust-lang/rust-clippy/issues/12016
|
||||||
|
blocks_in_conditions = "allow"
|
||||||
|
|
||||||
|
# Deny
|
||||||
|
cast_lossless = "deny"
|
||||||
|
clone_on_ref_ptr = "deny"
|
||||||
|
equatable_if_let = "deny"
|
||||||
|
float_cmp_const = "deny"
|
||||||
|
inefficient_to_string = "deny"
|
||||||
|
iter_on_empty_collections = "deny"
|
||||||
|
iter_on_single_items = "deny"
|
||||||
|
linkedlist = "deny"
|
||||||
|
macro_use_imports = "deny"
|
||||||
|
manual_assert = "deny"
|
||||||
|
manual_instant_elapsed = "deny"
|
||||||
|
manual_string_new = "deny"
|
||||||
|
match_wildcard_for_single_variants = "deny"
|
||||||
|
mem_forget = "deny"
|
||||||
|
needless_lifetimes = "deny"
|
||||||
|
string_add_assign = "deny"
|
||||||
|
string_to_string = "deny"
|
||||||
|
unnecessary_join = "deny"
|
||||||
|
unnecessary_self_imports = "deny"
|
||||||
|
unused_async = "deny"
|
||||||
|
verbose_file_reads = "deny"
|
||||||
|
zero_sized_map_values = "deny"
|
||||||
|
|||||||
@@ -92,4 +92,11 @@ Thanks for your contribution to the project!
|
|||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td align="center">
|
||||||
|
<a href="https://github.com/IQ333777" style="width: 75px">
|
||||||
|
<sub><b>IQ333777</b></sub>
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|||||||
24
build.rs
24
build.rs
@@ -17,6 +17,20 @@ fn main() {
|
|||||||
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Use check-cfg to let cargo know which cfg's we define,
|
||||||
|
// and avoid warnings when they are used in the code.
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(sqlite)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(mysql)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(postgresql)");
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(query_logger)");
|
||||||
|
|
||||||
|
// Rerun when these paths are changed.
|
||||||
|
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||||
|
println!("cargo:rerun-if-changed=.git");
|
||||||
|
println!("cargo:rerun-if-changed=.git/HEAD");
|
||||||
|
println!("cargo:rerun-if-changed=.git/index");
|
||||||
|
println!("cargo:rerun-if-changed=.git/refs/tags");
|
||||||
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
#[cfg(all(not(debug_assertions), feature = "query_logger"))]
|
||||||
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
compile_error!("Query Logging is only allowed during development, it is not intended for production usage!");
|
||||||
|
|
||||||
@@ -42,11 +56,11 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
|
|||||||
|
|
||||||
/// This method reads info from Git, namely tags, branch, and revision
|
/// This method reads info from Git, namely tags, branch, and revision
|
||||||
/// To access these values, use:
|
/// To access these values, use:
|
||||||
/// - env!("GIT_EXACT_TAG")
|
/// - `env!("GIT_EXACT_TAG")`
|
||||||
/// - env!("GIT_LAST_TAG")
|
/// - `env!("GIT_LAST_TAG")`
|
||||||
/// - env!("GIT_BRANCH")
|
/// - `env!("GIT_BRANCH")`
|
||||||
/// - env!("GIT_REV")
|
/// - `env!("GIT_REV")`
|
||||||
/// - env!("VW_VERSION")
|
/// - `env!("VW_VERSION")`
|
||||||
fn version_from_git_info() -> Result<String, std::io::Error> {
|
fn version_from_git_info() -> Result<String, std::io::Error> {
|
||||||
// The exact tag for the current commit, can be empty when
|
// The exact tag for the current commit, can be empty when
|
||||||
// the current commit doesn't have an associated tag
|
// the current commit doesn't have an associated tag
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
---
|
---
|
||||||
vault_version: "v2023.10.0"
|
vault_version: "v2024.5.1b"
|
||||||
vault_image_digest: "sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935"
|
vault_image_digest: "sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375"
|
||||||
# Cross Compile Docker Helper Scripts v1.3.0
|
# Cross Compile Docker Helper Scripts v1.4.0
|
||||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
|
xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
|
||||||
rust_version: 1.73.0 # Rust version to be used
|
rust_version: 1.79.0 # Rust version to be used
|
||||||
debian_version: bookworm # Debian release name to be used
|
debian_version: bookworm # Debian release name to be used
|
||||||
alpine_version: 3.18 # Alpine version to be used
|
alpine_version: "3.20" # Alpine version to be used
|
||||||
# For which platforms/architectures will we try to build images
|
# For which platforms/architectures will we try to build images
|
||||||
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
|
||||||
# Determine the build images per OS/Arch
|
# Determine the build images per OS/Arch
|
||||||
|
|||||||
@@ -18,23 +18,23 @@
|
|||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.5.1b
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.5.1b
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
|
# [docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.10.0]
|
# [docker.io/vaultwarden/web-vault:v2024.5.1b]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375 as vault
|
||||||
|
|
||||||
########################## ALPINE BUILD IMAGES ##########################
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.73.0 as build_amd64
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.79.0 as build_amd64
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.73.0 as build_arm64
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.79.0 as build_arm64
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.73.0 as build_armv7
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.79.0 as build_armv7
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.73.0 as build_armv6
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.79.0 as build_armv6
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
@@ -58,33 +58,29 @@ ENV DEBIAN_FRONTEND=noninteractive \
|
|||||||
|
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
&& rustup set profile minimal
|
rustup set profile minimal
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Shared variables across Debian and Alpine
|
# Environment variables for Cargo on Alpine based builds
|
||||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
|
||||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
|
||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
rustup target add "${CARGO_TARGET}"
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
ARG CARGO_PROFILE=release
|
|
||||||
ARG VW_VERSION
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
# Copies over *only* your manifests and build files
|
||||||
COPY ./Cargo.* ./
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
|
||||||
COPY ./build.rs ./build.rs
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
@@ -97,10 +93,13 @@ RUN source /env-cargo && \
|
|||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
# Builds again, this time it will be the actual source files being build
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
touch src/main.rs && \
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
@@ -126,7 +125,7 @@ RUN source /env-cargo && \
|
|||||||
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
|
||||||
#
|
#
|
||||||
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
|
||||||
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.18
|
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.20
|
||||||
|
|
||||||
ENV ROCKET_PROFILE="release" \
|
ENV ROCKET_PROFILE="release" \
|
||||||
ROCKET_ADDRESS=0.0.0.0 \
|
ROCKET_ADDRESS=0.0.0.0 \
|
||||||
@@ -149,8 +148,7 @@ EXPOSE 3012
|
|||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/final/vaultwarden .
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|||||||
@@ -18,24 +18,24 @@
|
|||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.5.1b
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.5.1b
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935]
|
# [docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375
|
||||||
# [docker.io/vaultwarden/web-vault:v2023.10.0]
|
# [docker.io/vaultwarden/web-vault:v2024.5.1b]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375 as vault
|
||||||
|
|
||||||
########################## Cross Compile Docker Helper Scripts ##########################
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
## And these bash scripts do not have any significant difference if at all
|
## And these bash scripts do not have any significant difference if at all
|
||||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.73.0-slim-bookworm as build
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.79.0-slim-bookworm as build
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
@@ -64,32 +64,40 @@ RUN apt-get update && \
|
|||||||
"libc6-$(xx-info debian-arch)-cross" \
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
"libc6-dev-$(xx-info debian-arch)-cross" \
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
xx-apt-get install -y \
|
||||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
|
||||||
|
|
||||||
RUN xx-apt-get install -y \
|
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
gcc \
|
gcc \
|
||||||
libmariadb3 \
|
libmariadb3 \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
libssl-dev && \
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
# Force install arch dependend mariadb dev packages
|
# Force install arch dependend mariadb dev packages
|
||||||
# Installing them the normal way breaks several other packages (again)
|
# Installing them the normal way breaks several other packages (again)
|
||||||
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
dpkg --force-all -i ./libmariadb-dev*.deb
|
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
&& rustup set profile minimal
|
rustup set profile minimal
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Environment variables for cargo across Debian and Alpine
|
# Environment variables for Cargo on Debian based builds
|
||||||
|
ARG ARCH_OPENSSL_LIB_DIR \
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
if xx-info is-cross ; then \
|
if xx-info is-cross ; then \
|
||||||
|
# Some special variables if needed to override some build paths
|
||||||
|
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
@@ -102,19 +110,16 @@ RUN source /env-cargo && \
|
|||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
rustup target add "${CARGO_TARGET}"
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
ARG CARGO_PROFILE=release
|
|
||||||
ARG VW_VERSION
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
# Copies over *only* your manifests and build files
|
||||||
COPY ./Cargo.* ./
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
|
||||||
COPY ./build.rs ./build.rs
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
@@ -127,10 +132,13 @@ RUN source /env-cargo && \
|
|||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
# Builds again, this time it will be the actual source files being build
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
touch src/main.rs && \
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
@@ -183,8 +191,7 @@ EXPOSE 3012
|
|||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/final/vaultwarden .
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|||||||
@@ -82,34 +82,42 @@ RUN apt-get update && \
|
|||||||
"libc6-$(xx-info debian-arch)-cross" \
|
"libc6-$(xx-info debian-arch)-cross" \
|
||||||
"libc6-dev-$(xx-info debian-arch)-cross" \
|
"libc6-dev-$(xx-info debian-arch)-cross" \
|
||||||
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
"linux-libc-dev-$(xx-info debian-arch)-cross" && \
|
||||||
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
xx-apt-get install -y \
|
||||||
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
|
||||||
|
|
||||||
RUN xx-apt-get install -y \
|
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
gcc \
|
gcc \
|
||||||
libmariadb3 \
|
libmariadb3 \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
libssl-dev && \
|
libssl-dev \
|
||||||
|
zlib1g-dev && \
|
||||||
# Force install arch dependend mariadb dev packages
|
# Force install arch dependend mariadb dev packages
|
||||||
# Installing them the normal way breaks several other packages (again)
|
# Installing them the normal way breaks several other packages (again)
|
||||||
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
|
||||||
dpkg --force-all -i ./libmariadb-dev*.deb
|
dpkg --force-all -i ./libmariadb-dev*.deb && \
|
||||||
|
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
|
||||||
|
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Create CARGO_HOME folder and don't download rust docs
|
# Create CARGO_HOME folder and don't download rust docs
|
||||||
RUN mkdir -pv "${CARGO_HOME}" \
|
RUN mkdir -pv "${CARGO_HOME}" && \
|
||||||
&& rustup set profile minimal
|
rustup set profile minimal
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
{% if base == "debian" %}
|
{% if base == "debian" %}
|
||||||
# Environment variables for cargo across Debian and Alpine
|
# Environment variables for Cargo on Debian based builds
|
||||||
|
ARG ARCH_OPENSSL_LIB_DIR \
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
if xx-info is-cross ; then \
|
if xx-info is-cross ; then \
|
||||||
|
# Some special variables if needed to override some build paths
|
||||||
|
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
@@ -122,30 +130,28 @@ RUN source /env-cargo && \
|
|||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
|
||||||
ARG DB=sqlite,mysql,postgresql
|
|
||||||
{% elif base == "alpine" %}
|
{% elif base == "alpine" %}
|
||||||
# Shared variables across Debian and Alpine
|
# Environment variables for Cargo on Alpine based builds
|
||||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
|
||||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
|
||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
rustup target add "${CARGO_TARGET}"
|
rustup target add "${CARGO_TARGET}"
|
||||||
|
|
||||||
ARG CARGO_PROFILE=release
|
|
||||||
ARG VW_VERSION
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
# Copies over *only* your manifests and build files
|
||||||
COPY ./Cargo.* ./
|
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
|
||||||
COPY ./rust-toolchain.toml ./rust-toolchain.toml
|
|
||||||
COPY ./build.rs ./build.rs
|
ARG CARGO_PROFILE=release
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
|
{% if base == "debian" %}
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
{% elif base == "alpine" %}
|
||||||
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
@@ -158,10 +164,13 @@ RUN source /env-cargo && \
|
|||||||
# To avoid copying unneeded files, use .dockerignore
|
# To avoid copying unneeded files, use .dockerignore
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
ARG VW_VERSION
|
||||||
|
|
||||||
# Builds again, this time it will be the actual source files being build
|
# Builds again, this time it will be the actual source files being build
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
# Make sure that we actually build the project by updating the src/main.rs timestamp
|
||||||
touch src/main.rs && \
|
# Also do this for build.rs to ensure the version is rechecked
|
||||||
|
touch build.rs src/main.rs && \
|
||||||
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
# Create a symlink to the binary target folder to easy copy the binary in the final stage
|
||||||
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \
|
||||||
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \
|
||||||
@@ -226,8 +235,7 @@ EXPOSE 3012
|
|||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh docker/start.sh /
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/final/vaultwarden .
|
COPY --from=build /app/target/final/vaultwarden .
|
||||||
|
|||||||
@@ -11,6 +11,11 @@ With just these two files we can build both Debian and Alpine images for the fol
|
|||||||
- armv7 (linux/arm/v7)
|
- armv7 (linux/arm/v7)
|
||||||
- armv6 (linux/arm/v6)
|
- armv6 (linux/arm/v6)
|
||||||
|
|
||||||
|
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
|
||||||
|
- 386 (linux/386)
|
||||||
|
- ppc64le (linux/ppc64le)
|
||||||
|
- s390x (linux/s390x)
|
||||||
|
|
||||||
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||||
This ensures the container build process can run binaries from other architectures.<br>
|
This ensures the container build process can run binaries from other architectures.<br>
|
||||||
|
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ target "debian" {
|
|||||||
inherits = ["_default_attributes"]
|
inherits = ["_default_attributes"]
|
||||||
dockerfile = "docker/Dockerfile.debian"
|
dockerfile = "docker/Dockerfile.debian"
|
||||||
tags = generate_tags("", platform_tag())
|
tags = generate_tags("", platform_tag())
|
||||||
output = [join(",", flatten([["type=docker"], image_index_annotations()]))]
|
output = ["type=docker"]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multi Platform target, will build one tagged manifest with all supported architectures
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
@@ -125,6 +125,40 @@ target "debian-armv6" {
|
|||||||
tags = generate_tags("", "-armv6")
|
tags = generate_tags("", "-armv6")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ==== Start of unsupported Debian architecture targets ===
|
||||||
|
// These are provided just to help users build for these rare platforms
|
||||||
|
// They will not be built by default
|
||||||
|
target "debian-386" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/386"]
|
||||||
|
tags = generate_tags("", "-386")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-ppc64le" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/ppc64le"]
|
||||||
|
tags = generate_tags("", "-ppc64le")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-s390x" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/s390x"]
|
||||||
|
tags = generate_tags("", "-s390x")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ==== End of unsupported Debian architecture targets ===
|
||||||
|
|
||||||
// A Group to build all platforms individually for local testing
|
// A Group to build all platforms individually for local testing
|
||||||
group "debian-all" {
|
group "debian-all" {
|
||||||
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||||
@@ -138,7 +172,7 @@ target "alpine" {
|
|||||||
inherits = ["_default_attributes"]
|
inherits = ["_default_attributes"]
|
||||||
dockerfile = "docker/Dockerfile.alpine"
|
dockerfile = "docker/Dockerfile.alpine"
|
||||||
tags = generate_tags("-alpine", platform_tag())
|
tags = generate_tags("-alpine", platform_tag())
|
||||||
output = [join(",", flatten([["type=docker"], image_index_annotations()]))]
|
output = ["type=docker"]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multi Platform target, will build one tagged manifest with all supported architectures
|
// Multi Platform target, will build one tagged manifest with all supported architectures
|
||||||
@@ -216,7 +250,13 @@ function "generate_tags" {
|
|||||||
result = flatten([
|
result = flatten([
|
||||||
for registry in get_container_registries() :
|
for registry in get_container_registries() :
|
||||||
[for base_tag in get_base_tags() :
|
[for base_tag in get_base_tags() :
|
||||||
concat(["${registry}:${base_tag}${suffix}${platform}"])]
|
concat(
|
||||||
|
# If the base_tag contains latest, and the suffix contains `-alpine` add a `:alpine` tag too
|
||||||
|
base_tag == "latest" ? suffix == "-alpine" ? ["${registry}:alpine${platform}"] : [] : [],
|
||||||
|
# The default tagging strategy
|
||||||
|
["${registry}:${base_tag}${suffix}${platform}"]
|
||||||
|
)
|
||||||
|
]
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,20 @@
|
|||||||
#!/bin/sh
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
# Use the value of the corresponding env var (if present),
|
# Use the value of the corresponding env var (if present),
|
||||||
# or a default value otherwise.
|
# or a default value otherwise.
|
||||||
: "${DATA_FOLDER:="data"}"
|
: "${DATA_FOLDER:="/data"}"
|
||||||
: "${ROCKET_PORT:="80"}"
|
: "${ROCKET_PORT:="80"}"
|
||||||
|
: "${ENV_FILE:="/.env"}"
|
||||||
|
|
||||||
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
CONFIG_FILE="${DATA_FOLDER}"/config.json
|
||||||
|
|
||||||
|
# Check if the $ENV_FILE file exist and is readable
|
||||||
|
# If that is the case, load it into the environment before running any check
|
||||||
|
if [ -r "${ENV_FILE}" ]; then
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
. "${ENV_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
# Given a config key, return the corresponding config value from the
|
# Given a config key, return the corresponding config value from the
|
||||||
# config file. If the key doesn't exist, return an empty string.
|
# config file. If the key doesn't exist, return an empty string.
|
||||||
get_config_val() {
|
get_config_val() {
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE twofactor MODIFY last_used BIGINT NOT NULL;
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE attachments
|
||||||
|
ALTER COLUMN file_size TYPE BIGINT,
|
||||||
|
ALTER COLUMN file_size SET NOT NULL;
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
ALTER TABLE twofactor
|
||||||
|
ALTER COLUMN last_used TYPE BIGINT,
|
||||||
|
ALTER COLUMN last_used SET NOT NULL;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
-- Integer size in SQLite is already i64, so we don't need to do anything
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.73.0"
|
channel = "1.79.0"
|
||||||
components = [ "rustfmt", "clippy" ]
|
components = [ "rustfmt", "clippy" ]
|
||||||
profile = "minimal"
|
profile = "minimal"
|
||||||
|
|||||||
@@ -13,14 +13,18 @@ use rocket::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString},
|
api::{
|
||||||
|
core::{log_event, two_factor},
|
||||||
|
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
|
||||||
|
},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
mail,
|
mail,
|
||||||
util::{
|
util::{
|
||||||
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
|
container_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client,
|
||||||
|
is_running_in_container, NumberOrString,
|
||||||
},
|
},
|
||||||
CONFIG, VERSION,
|
CONFIG, VERSION,
|
||||||
};
|
};
|
||||||
@@ -184,12 +188,11 @@ fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp
|
|||||||
let claims = generate_admin_claims();
|
let claims = generate_admin_claims();
|
||||||
let jwt = encode_jwt(&claims);
|
let jwt = encode_jwt(&claims);
|
||||||
|
|
||||||
let cookie = Cookie::build(COOKIE_NAME, jwt)
|
let cookie = Cookie::build((COOKIE_NAME, jwt))
|
||||||
.path(admin_path())
|
.path(admin_path())
|
||||||
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
.max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime()))
|
||||||
.same_site(SameSite::Strict)
|
.same_site(SameSite::Strict)
|
||||||
.http_only(true)
|
.http_only(true);
|
||||||
.finish();
|
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
if let Some(redirect) = redirect {
|
if let Some(redirect) = redirect {
|
||||||
@@ -262,8 +265,8 @@ fn admin_page_login() -> ApiResult<Html<String>> {
|
|||||||
render_admin_login(None, None)
|
render_admin_login(None, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct InviteData {
|
struct InviteData {
|
||||||
email: String,
|
email: String,
|
||||||
}
|
}
|
||||||
@@ -313,7 +316,7 @@ async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
fn logout(cookies: &CookieJar<'_>) -> Redirect {
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
Redirect::to(admin_path())
|
Redirect::to(admin_path())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -323,9 +326,9 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
|
|||||||
let mut users_json = Vec::with_capacity(users.len());
|
let mut users_json = Vec::with_capacity(users.len());
|
||||||
for u in users {
|
for u in users {
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
usr["LastActive"] = match u.last_active(&mut conn).await {
|
usr["lastActive"] = match u.last_active(&mut conn).await {
|
||||||
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
|
||||||
None => json!(None::<String>),
|
None => json!(None::<String>),
|
||||||
};
|
};
|
||||||
@@ -343,7 +346,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
|
||||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
|
||||||
usr["user_enabled"] = json!(u.enabled);
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
usr["last_active"] = match u.last_active(&mut conn).await {
|
usr["last_active"] = match u.last_active(&mut conn).await {
|
||||||
@@ -361,8 +364,8 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
|
|||||||
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
if let Some(u) = User::find_by_mail(mail, &mut conn).await {
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
Ok(Json(usr))
|
Ok(Json(usr))
|
||||||
} else {
|
} else {
|
||||||
err_code!("User doesn't exist", Status::NotFound.code);
|
err_code!("User doesn't exist", Status::NotFound.code);
|
||||||
@@ -373,8 +376,8 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn)
|
|||||||
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
|
||||||
let u = get_user_or_404(uuid, &mut conn).await?;
|
let u = get_user_or_404(uuid, &mut conn).await?;
|
||||||
let mut usr = u.to_json(&mut conn).await;
|
let mut usr = u.to_json(&mut conn).await;
|
||||||
usr["UserEnabled"] = json!(u.enabled);
|
usr["userEnabled"] = json!(u.enabled);
|
||||||
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
|
||||||
Ok(Json(usr))
|
Ok(Json(usr))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -391,7 +394,7 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
|
|||||||
EventType::OrganizationUserRemoved as i32,
|
EventType::OrganizationUserRemoved as i32,
|
||||||
&user_org.uuid,
|
&user_org.uuid,
|
||||||
&user_org.org_uuid,
|
&user_org.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -410,7 +413,7 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
|
|||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await {
|
||||||
match unregister_push_device(device.uuid).await {
|
match unregister_push_device(device.push_uuid).await {
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e),
|
||||||
};
|
};
|
||||||
@@ -446,9 +449,10 @@ async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyR
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult {
|
||||||
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
let mut user = get_user_or_404(uuid, &mut conn).await?;
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?;
|
||||||
user.totp_recover = None;
|
user.totp_recover = None;
|
||||||
user.save(&mut conn).await
|
user.save(&mut conn).await
|
||||||
}
|
}
|
||||||
@@ -471,7 +475,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
struct UserOrgTypeData {
|
struct UserOrgTypeData {
|
||||||
user_type: NumberOrString,
|
user_type: NumberOrString,
|
||||||
user_uuid: String,
|
user_uuid: String,
|
||||||
@@ -506,7 +510,11 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||||||
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(OrgPolicyErr::TwoFactorMissing) => {
|
Err(OrgPolicyErr::TwoFactorMissing) => {
|
||||||
err!("You cannot modify this user to this type because it has no two-step login method activated");
|
if CONFIG.email_2fa_auto_fallback() {
|
||||||
|
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?;
|
||||||
|
} else {
|
||||||
|
err!("You cannot modify this user to this type because they have not setup 2FA");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
Err(OrgPolicyErr::SingleOrgEnforced) => {
|
||||||
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
|
||||||
@@ -518,7 +526,7 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
|
|||||||
EventType::OrganizationUserUpdated as i32,
|
EventType::OrganizationUserUpdated as i32,
|
||||||
&user_to_edit.uuid,
|
&user_to_edit.uuid,
|
||||||
&data.org_uuid,
|
&data.org_uuid,
|
||||||
String::from(ACTING_ADMIN_USER),
|
ACTING_ADMIN_USER,
|
||||||
14, // Use UnknownBrowser type
|
14, // Use UnknownBrowser type
|
||||||
&token.ip.ip,
|
&token.ip.ip,
|
||||||
&mut conn,
|
&mut conn,
|
||||||
@@ -546,7 +554,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
|
|||||||
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
|
||||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
|
||||||
organizations_json.push(org);
|
organizations_json.push(org);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -604,7 +612,7 @@ use cached::proc_macro::cached;
|
|||||||
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already.
|
||||||
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit.
|
||||||
#[cached(time = 300, sync_writes = true)]
|
#[cached(time = 300, sync_writes = true)]
|
||||||
async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) {
|
async fn get_release_info(has_http_access: bool, running_within_container: bool) -> (String, String, String) {
|
||||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||||
if has_http_access {
|
if has_http_access {
|
||||||
(
|
(
|
||||||
@@ -621,9 +629,9 @@ async fn get_release_info(has_http_access: bool, running_within_docker: bool) ->
|
|||||||
}
|
}
|
||||||
_ => "-".to_string(),
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
// Do not fetch the web-vault version when running within Docker.
|
// Do not fetch the web-vault version when running within a container.
|
||||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||||
if running_within_docker {
|
if running_within_container {
|
||||||
"-".to_string()
|
"-".to_string()
|
||||||
} else {
|
} else {
|
||||||
match get_json_api::<GitRelease>(
|
match get_json_api::<GitRelease>(
|
||||||
@@ -677,7 +685,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Execute some environment checks
|
// Execute some environment checks
|
||||||
let running_within_docker = is_running_in_docker();
|
let running_within_container = is_running_in_container();
|
||||||
let has_http_access = has_http_access().await;
|
let has_http_access = has_http_access().await;
|
||||||
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||||
|| env::var_os("http_proxy").is_some()
|
|| env::var_os("http_proxy").is_some()
|
||||||
@@ -691,12 +699,9 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
};
|
};
|
||||||
|
|
||||||
let (latest_release, latest_commit, latest_web_build) =
|
let (latest_release, latest_commit, latest_web_build) =
|
||||||
get_release_info(has_http_access, running_within_docker).await;
|
get_release_info(has_http_access, running_within_container).await;
|
||||||
|
|
||||||
let ip_header_name = match &ip_header.0 {
|
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||||
Some(h) => h,
|
|
||||||
_ => "",
|
|
||||||
};
|
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
@@ -706,11 +711,11 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||||||
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
"web_vault_enabled": &CONFIG.web_vault_enabled(),
|
||||||
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
"web_vault_version": web_vault_version.version.trim_start_matches('v'),
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
"running_within_docker": running_within_docker,
|
"running_within_container": running_within_container,
|
||||||
"docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" },
|
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||||
"has_http_access": has_http_access,
|
"has_http_access": has_http_access,
|
||||||
"ip_header_exists": &ip_header.0.is_some(),
|
"ip_header_exists": !ip_header_name.is_empty(),
|
||||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
|
||||||
"ip_header_name": ip_header_name,
|
"ip_header_name": ip_header_name,
|
||||||
"ip_header_config": &CONFIG.ip_header(),
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
"uses_proxy": uses_proxy,
|
"uses_proxy": uses_proxy,
|
||||||
@@ -786,16 +791,16 @@ impl<'r> FromRequest<'r> for AdminToken {
|
|||||||
if requested_page.is_empty() {
|
if requested_page.is_empty() {
|
||||||
return Outcome::Forward(Status::Unauthorized);
|
return Outcome::Forward(Status::Unauthorized);
|
||||||
} else {
|
} else {
|
||||||
return Outcome::Failure((Status::Unauthorized, "Unauthorized"));
|
return Outcome::Error((Status::Unauthorized, "Unauthorized"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if decode_admin(access_token).is_err() {
|
if decode_admin(access_token).is_err() {
|
||||||
// Remove admin cookie
|
// Remove admin cookie
|
||||||
cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish());
|
cookies.remove(Cookie::build(COOKIE_NAME).path(admin_path()));
|
||||||
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
error!("Invalid or expired admin JWT. IP: {}.", &ip.ip);
|
||||||
return Outcome::Failure((Status::Unauthorized, "Session expired"));
|
return Outcome::Error((Status::Unauthorized, "Session expired"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,17 @@
|
|||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use rocket::{serde::json::Json, Route};
|
use rocket::{serde::json::Json, Route};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{CipherSyncData, CipherSyncType},
|
core::{CipherSyncData, CipherSyncType},
|
||||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
|
EmptyResult, JsonResult,
|
||||||
},
|
},
|
||||||
auth::{decode_emergency_access_invite, Headers},
|
auth::{decode_emergency_access_invite, Headers},
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
@@ -18,6 +20,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
get_grantees,
|
get_grantees,
|
||||||
get_emergency_access,
|
get_emergency_access,
|
||||||
put_emergency_access,
|
put_emergency_access,
|
||||||
|
post_emergency_access,
|
||||||
delete_emergency_access,
|
delete_emergency_access,
|
||||||
post_delete_emergency_access,
|
post_delete_emergency_access,
|
||||||
send_invite,
|
send_invite,
|
||||||
@@ -37,45 +40,66 @@ pub fn routes() -> Vec<Route> {
|
|||||||
// region get
|
// region get
|
||||||
|
|
||||||
#[get("/emergency-access/trusted")]
|
#[get("/emergency-access/trusted")]
|
||||||
async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
check_emergency_access_allowed()?;
|
if !CONFIG.emergency_access_allowed() {
|
||||||
|
return Json(json!({
|
||||||
|
"data": [{
|
||||||
|
"id": "",
|
||||||
|
"status": 2,
|
||||||
|
"type": 0,
|
||||||
|
"waitTimeDays": 0,
|
||||||
|
"granteeId": "",
|
||||||
|
"email": "",
|
||||||
|
"name": "NOTE: Emergency Access is disabled!",
|
||||||
|
"object": "emergencyAccessGranteeDetails",
|
||||||
|
|
||||||
|
}],
|
||||||
|
"object": "list",
|
||||||
|
"continuationToken": null
|
||||||
|
}));
|
||||||
|
}
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
|
||||||
|
emergency_access_list_json.push(grantee)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/granted")]
|
#[get("/emergency-access/granted")]
|
||||||
async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
check_emergency_access_allowed()?;
|
let emergency_access_list = if CONFIG.emergency_access_allowed() {
|
||||||
|
EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await;
|
} else {
|
||||||
|
Vec::new()
|
||||||
|
};
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Json(json!({
|
||||||
"Data": emergency_access_list_json,
|
"data": emergency_access_list_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
})))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/emergency-access/<emer_id>")]
|
#[get("/emergency-access/<emer_id>")]
|
||||||
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
Some(emergency_access) => Ok(Json(
|
||||||
|
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
|
||||||
|
)),
|
||||||
None => err!("Emergency access not valid."),
|
None => err!("Emergency access not valid."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -85,42 +109,49 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
|||||||
// region put/post
|
// region put/post
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessUpdateData {
|
struct EmergencyAccessUpdateData {
|
||||||
Type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
WaitTimeDays: i32,
|
wait_time_days: i32,
|
||||||
KeyEncrypted: Option<String>,
|
key_encrypted: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
#[put("/emergency-access/<emer_id>", data = "<data>")]
|
||||||
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
|
async fn put_emergency_access(
|
||||||
post_emergency_access(emer_id, data, conn).await
|
emer_id: &str,
|
||||||
|
data: Json<EmergencyAccessUpdateData>,
|
||||||
|
headers: Headers,
|
||||||
|
conn: DbConn,
|
||||||
|
) -> JsonResult {
|
||||||
|
post_emergency_access(emer_id, data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
#[post("/emergency-access/<emer_id>", data = "<data>")]
|
||||||
async fn post_emergency_access(
|
async fn post_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: &str,
|
||||||
data: JsonUpcase<EmergencyAccessUpdateData>,
|
data: Json<EmergencyAccessUpdateData>,
|
||||||
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessUpdateData = data.into_inner().data;
|
let data: EmergencyAccessUpdateData = data.into_inner();
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emergency_access) => emergency_access,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emergency_access) => emergency_access,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||||
Some(new_type) => new_type as i32,
|
Some(new_type) => new_type as i32,
|
||||||
None => err!("Invalid emergency access type."),
|
None => err!("Invalid emergency access type."),
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.atype = new_type;
|
emergency_access.atype = new_type;
|
||||||
emergency_access.wait_time_days = data.WaitTimeDays;
|
emergency_access.wait_time_days = data.wait_time_days;
|
||||||
if data.KeyEncrypted.is_some() {
|
if data.key_encrypted.is_some() {
|
||||||
emergency_access.key_encrypted = data.KeyEncrypted;
|
emergency_access.key_encrypted = data.key_encrypted;
|
||||||
}
|
}
|
||||||
|
|
||||||
emergency_access.save(&mut conn).await?;
|
emergency_access.save(&mut conn).await?;
|
||||||
@@ -133,19 +164,23 @@ async fn post_emergency_access(
|
|||||||
|
|
||||||
#[delete("/emergency-access/<emer_id>")]
|
#[delete("/emergency-access/<emer_id>")]
|
||||||
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let grantor_user = headers.user;
|
let emergency_access = match (
|
||||||
|
EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await,
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await,
|
||||||
Some(emer) => {
|
) {
|
||||||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
|
(Some(grantor_emer), None) => {
|
||||||
err!("Emergency access not valid.")
|
info!("Grantor deleted emergency access {emer_id}");
|
||||||
}
|
grantor_emer
|
||||||
emer
|
|
||||||
}
|
}
|
||||||
None => err!("Emergency access not valid."),
|
(None, Some(grantee_emer)) => {
|
||||||
|
info!("Grantee deleted emergency access {emer_id}");
|
||||||
|
grantee_emer
|
||||||
|
}
|
||||||
|
_ => err!("Emergency access not valid."),
|
||||||
};
|
};
|
||||||
|
|
||||||
emergency_access.delete(&mut conn).await?;
|
emergency_access.delete(&mut conn).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -160,24 +195,24 @@ async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbC
|
|||||||
// region invite
|
// region invite
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessInviteData {
|
struct EmergencyAccessInviteData {
|
||||||
Email: String,
|
email: String,
|
||||||
Type: NumberOrString,
|
r#type: NumberOrString,
|
||||||
WaitTimeDays: i32,
|
wait_time_days: i32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/invite", data = "<data>")]
|
#[post("/emergency-access/invite", data = "<data>")]
|
||||||
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessInviteData = data.into_inner().data;
|
let data: EmergencyAccessInviteData = data.into_inner();
|
||||||
let email = data.Email.to_lowercase();
|
let email = data.email.to_lowercase();
|
||||||
let wait_time_days = data.WaitTimeDays;
|
let wait_time_days = data.wait_time_days;
|
||||||
|
|
||||||
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
|
||||||
|
|
||||||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
|
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
|
||||||
Some(new_type) => new_type as i32,
|
Some(new_type) => new_type as i32,
|
||||||
None => err!("Invalid emergency access type."),
|
None => err!("Invalid emergency access type."),
|
||||||
};
|
};
|
||||||
@@ -189,7 +224,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
err!("You can not set yourself as an emergency contact.")
|
err!("You can not set yourself as an emergency contact.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
|
||||||
None => {
|
None => {
|
||||||
if !CONFIG.invitations_allowed() {
|
if !CONFIG.invitations_allowed() {
|
||||||
err!(format!("Grantee user does not exist: {}", &email))
|
err!(format!("Grantee user does not exist: {}", &email))
|
||||||
@@ -206,9 +241,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
|
|
||||||
let mut user = User::new(email.clone());
|
let mut user = User::new(email.clone());
|
||||||
user.save(&mut conn).await?;
|
user.save(&mut conn).await?;
|
||||||
user
|
(user, true)
|
||||||
}
|
}
|
||||||
Some(user) => user,
|
Some(user) if user.password_hash.is_empty() => (user, true),
|
||||||
|
Some(user) => (user, false),
|
||||||
};
|
};
|
||||||
|
|
||||||
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||||
@@ -236,15 +272,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !new_user {
|
||||||
// Automatically mark user as accepted if no email invites
|
// if mail is not enabled immediately accept the invitation for existing users
|
||||||
match User::find_by_mail(&email, &mut conn).await {
|
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
},
|
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -252,16 +282,13 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reinvite")]
|
#[post("/emergency-access/<emer_id>/reinvite")]
|
||||||
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
if emergency_access.grantor_uuid != headers.user.uuid {
|
|
||||||
err!("Emergency access not valid.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
|
||||||
err!("The grantee user is already accepted or confirmed to the organization");
|
err!("The grantee user is already accepted or confirmed to the organization");
|
||||||
@@ -288,34 +315,29 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
|
|||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !grantee_user.password_hash.is_empty() {
|
||||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
// accept the invitation for existing user
|
||||||
let invitation = Invitation::new(&email);
|
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
invitation.save(&mut conn).await?;
|
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||||
}
|
let invitation = Invitation::new(&email);
|
||||||
|
invitation.save(&mut conn).await?;
|
||||||
// Automatically mark user as accepted if no email invites
|
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct AcceptData {
|
struct AcceptData {
|
||||||
Token: String,
|
token: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
|
||||||
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: AcceptData = data.into_inner().data;
|
let data: AcceptData = data.into_inner();
|
||||||
let token = &data.Token;
|
let token = &data.token;
|
||||||
let claims = decode_emergency_access_invite(token)?;
|
let claims = decode_emergency_access_invite(token)?;
|
||||||
|
|
||||||
// This can happen if the user who received the invite used a different email to signup.
|
// This can happen if the user who received the invite used a different email to signup.
|
||||||
@@ -332,10 +354,13 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
None => err!("Invited user not found"),
|
None => err!("Invited user not found"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
// We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database.
|
||||||
Some(emer) => emer,
|
// The uuid of the grantee gets stored once accepted.
|
||||||
None => err!("Emergency access not valid."),
|
let mut emergency_access =
|
||||||
};
|
match EmergencyAccess::find_by_uuid_and_grantee_email(emer_id, &headers.user.email, &mut conn).await {
|
||||||
|
Some(emer) => emer,
|
||||||
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
// get grantor user to send Accepted email
|
// get grantor user to send Accepted email
|
||||||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
|
||||||
@@ -347,10 +372,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
&& grantor_user.name == claims.grantor_name
|
&& grantor_user.name == claims.grantor_name
|
||||||
&& grantor_user.email == claims.grantor_email
|
&& grantor_user.email == claims.grantor_email
|
||||||
{
|
{
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||||
@@ -362,49 +384,30 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn accept_invite_process(
|
|
||||||
grantee_uuid: &str,
|
|
||||||
emergency_access: &mut EmergencyAccess,
|
|
||||||
grantee_email: &str,
|
|
||||||
conn: &mut DbConn,
|
|
||||||
) -> EmptyResult {
|
|
||||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
|
||||||
err!("User email does not match invite.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
|
|
||||||
err!("Emergency contact already accepted.");
|
|
||||||
}
|
|
||||||
|
|
||||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
|
||||||
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
|
|
||||||
emergency_access.email = None;
|
|
||||||
emergency_access.save(conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct ConfirmData {
|
struct ConfirmData {
|
||||||
Key: String,
|
key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
|
||||||
async fn confirm_emergency_access(
|
async fn confirm_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: &str,
|
||||||
data: JsonUpcase<ConfirmData>,
|
data: Json<ConfirmData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let confirming_user = headers.user;
|
let confirming_user = headers.user;
|
||||||
let data: ConfirmData = data.into_inner().data;
|
let data: ConfirmData = data.into_inner();
|
||||||
let key = data.Key;
|
let key = data.key;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &confirming_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|
||||||
|| emergency_access.grantor_uuid != confirming_user.uuid
|
|| emergency_access.grantor_uuid != confirming_user.uuid
|
||||||
@@ -444,17 +447,16 @@ async fn confirm_emergency_access(
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/initiate")]
|
#[post("/emergency-access/<emer_id>/initiate")]
|
||||||
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let initiating_user = headers.user;
|
let initiating_user = headers.user;
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &initiating_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 {
|
||||||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
|
|
||||||
{
|
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -484,16 +486,15 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/approve")]
|
#[post("/emergency-access/<emer_id>/approve")]
|
||||||
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 {
|
||||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
|
||||||
{
|
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,25 +523,20 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/reject")]
|
#[post("/emergency-access/<emer_id>/reject")]
|
||||||
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let mut emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|
||||||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32
|
||||||
|| emergency_access.grantor_uuid != headers.user.uuid
|
|
||||||
{
|
{
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
|
|
||||||
Some(user) => user,
|
|
||||||
None => err!("Grantor user not found."),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
|
||||||
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
@@ -551,7 +547,7 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
|||||||
emergency_access.save(&mut conn).await?;
|
emergency_access.save(&mut conn).await?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?;
|
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &headers.user.name).await?;
|
||||||
}
|
}
|
||||||
Ok(Json(emergency_access.to_json()))
|
Ok(Json(emergency_access.to_json()))
|
||||||
} else {
|
} else {
|
||||||
@@ -565,12 +561,13 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
|
|||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/view")]
|
#[post("/emergency-access/<emer_id>/view")]
|
||||||
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
@@ -594,21 +591,22 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
|
|||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Ciphers": ciphers_json,
|
"ciphers": ciphers_json,
|
||||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
"keyEncrypted": &emergency_access.key_encrypted,
|
||||||
"Object": "emergencyAccessView",
|
"object": "emergencyAccessView",
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/takeover")]
|
#[post("/emergency-access/<emer_id>/takeover")]
|
||||||
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
@@ -620,42 +618,43 @@ async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||||||
};
|
};
|
||||||
|
|
||||||
let result = json!({
|
let result = json!({
|
||||||
"Kdf": grantor_user.client_kdf_type,
|
"kdf": grantor_user.client_kdf_type,
|
||||||
"KdfIterations": grantor_user.client_kdf_iter,
|
"kdfIterations": grantor_user.client_kdf_iter,
|
||||||
"KdfMemory": grantor_user.client_kdf_memory,
|
"kdfMemory": grantor_user.client_kdf_memory,
|
||||||
"KdfParallelism": grantor_user.client_kdf_parallelism,
|
"kdfParallelism": grantor_user.client_kdf_parallelism,
|
||||||
"KeyEncrypted": &emergency_access.key_encrypted,
|
"keyEncrypted": &emergency_access.key_encrypted,
|
||||||
"Object": "emergencyAccessTakeover",
|
"object": "emergencyAccessTakeover",
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmergencyAccessPasswordData {
|
struct EmergencyAccessPasswordData {
|
||||||
NewMasterPasswordHash: String,
|
new_master_password_hash: String,
|
||||||
Key: String,
|
key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
|
||||||
async fn password_emergency_access(
|
async fn password_emergency_access(
|
||||||
emer_id: &str,
|
emer_id: &str,
|
||||||
data: JsonUpcase<EmergencyAccessPasswordData>,
|
data: Json<EmergencyAccessPasswordData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
check_emergency_access_allowed()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
let data: EmergencyAccessPasswordData = data.into_inner().data;
|
let data: EmergencyAccessPasswordData = data.into_inner();
|
||||||
let new_master_password_hash = &data.NewMasterPasswordHash;
|
let new_master_password_hash = &data.new_master_password_hash;
|
||||||
//let key = &data.Key;
|
//let key = &data.Key;
|
||||||
|
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
@@ -667,7 +666,7 @@ async fn password_emergency_access(
|
|||||||
};
|
};
|
||||||
|
|
||||||
// change grantor_user password
|
// change grantor_user password
|
||||||
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
|
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
|
||||||
grantor_user.save(&mut conn).await?;
|
grantor_user.save(&mut conn).await?;
|
||||||
|
|
||||||
// Disable TwoFactor providers since they will otherwise block logins
|
// Disable TwoFactor providers since they will otherwise block logins
|
||||||
@@ -687,10 +686,11 @@ async fn password_emergency_access(
|
|||||||
#[get("/emergency-access/<emer_id>/policies")]
|
#[get("/emergency-access/<emer_id>/policies")]
|
||||||
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let requesting_user = headers.user;
|
let requesting_user = headers.user;
|
||||||
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
let emergency_access =
|
||||||
Some(emer) => emer,
|
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
|
||||||
None => err!("Emergency access not valid."),
|
Some(emer) => emer,
|
||||||
};
|
None => err!("Emergency access not valid."),
|
||||||
|
};
|
||||||
|
|
||||||
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
|
||||||
err!("Emergency access not valid.")
|
err!("Emergency access not valid.")
|
||||||
@@ -705,9 +705,9 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
|
|||||||
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": policies_json,
|
"data": policies_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -722,9 +722,9 @@ fn is_valid_request(
|
|||||||
&& emergency_access.atype == requested_access_type as i32
|
&& emergency_access.atype == requested_access_type as i32
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_emergency_access_allowed() -> EmptyResult {
|
fn check_emergency_access_enabled() -> EmptyResult {
|
||||||
if !CONFIG.emergency_access_allowed() {
|
if !CONFIG.emergency_access_allowed() {
|
||||||
err!("Emergency access is not allowed.")
|
err!("Emergency access is not enabled.")
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -746,7 +746,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
|
|||||||
for mut emer in emergency_access_list {
|
for mut emer in emergency_access_list {
|
||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
let recovery_allowed_at =
|
let recovery_allowed_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days)).unwrap();
|
||||||
if recovery_allowed_at.le(&now) {
|
if recovery_allowed_at.le(&now) {
|
||||||
// Only update the access status
|
// Only update the access status
|
||||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||||
@@ -802,10 +802,10 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
|||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
// Calculate the day before the recovery will become active
|
// Calculate the day before the recovery will become active
|
||||||
let final_recovery_reminder_at =
|
let final_recovery_reminder_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days - 1)).unwrap();
|
||||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||||
last_notification_at + Duration::days(1)
|
last_notification_at + TimeDelta::try_days(1).unwrap()
|
||||||
} else {
|
} else {
|
||||||
now
|
now
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use rocket::{form::FromForm, serde::json::Json, Route};
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcaseVec},
|
api::{EmptyResult, JsonResult},
|
||||||
auth::{AdminHeaders, Headers},
|
auth::{AdminHeaders, Headers},
|
||||||
db::{
|
db::{
|
||||||
models::{Cipher, Event, UserOrganization},
|
models::{Cipher, Event, UserOrganization},
|
||||||
@@ -22,7 +22,6 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromForm)]
|
#[derive(FromForm)]
|
||||||
#[allow(non_snake_case)]
|
|
||||||
struct EventRange {
|
struct EventRange {
|
||||||
start: String,
|
start: String,
|
||||||
end: String,
|
end: String,
|
||||||
@@ -53,9 +52,9 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,9 +84,9 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,13 +118,13 @@ async fn get_user_events(
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Data": events_json,
|
"data": events_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": get_continuation_token(&events_json),
|
"continuationToken": get_continuation_token(&events_json),
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
|
fn get_continuation_token(events_json: &[Value]) -> Option<&str> {
|
||||||
// When the length of the vec equals the max page_size there probably is more data
|
// When the length of the vec equals the max page_size there probably is more data
|
||||||
// When it is less, then all events are loaded.
|
// When it is less, then all events are loaded.
|
||||||
if events_json.len() as i64 == Event::PAGE_SIZE {
|
if events_json.len() as i64 == Event::PAGE_SIZE {
|
||||||
@@ -145,33 +144,33 @@ pub fn main_routes() -> Vec<Route> {
|
|||||||
routes![post_events_collect,]
|
routes![post_events_collect,]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EventCollection {
|
struct EventCollection {
|
||||||
// Mandatory
|
// Mandatory
|
||||||
Type: i32,
|
r#type: i32,
|
||||||
Date: String,
|
date: String,
|
||||||
|
|
||||||
// Optional
|
// Optional
|
||||||
CipherId: Option<String>,
|
cipher_id: Option<String>,
|
||||||
OrganizationId: Option<String>,
|
organization_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upstream:
|
// Upstream:
|
||||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
|
||||||
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
|
||||||
#[post("/collect", format = "application/json", data = "<data>")]
|
#[post("/collect", format = "application/json", data = "<data>")]
|
||||||
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
for event in data.iter().map(|d| &d.data) {
|
for event in data.iter() {
|
||||||
let event_date = parse_date(&event.Date);
|
let event_date = parse_date(&event.date);
|
||||||
match event.Type {
|
match event.r#type {
|
||||||
1000..=1099 => {
|
1000..=1099 => {
|
||||||
_log_user_event(
|
_log_user_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
headers.device.atype,
|
headers.device.atype,
|
||||||
Some(event_date),
|
Some(event_date),
|
||||||
@@ -181,9 +180,9 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
1600..=1699 => {
|
1600..=1699 => {
|
||||||
if let Some(org_uuid) = &event.OrganizationId {
|
if let Some(org_uuid) = &event.organization_id {
|
||||||
_log_event(
|
_log_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
org_uuid,
|
org_uuid,
|
||||||
org_uuid,
|
org_uuid,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
@@ -196,11 +195,11 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
if let Some(cipher_uuid) = &event.CipherId {
|
if let Some(cipher_uuid) = &event.cipher_id {
|
||||||
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
|
||||||
if let Some(org_uuid) = cipher.organization_uuid {
|
if let Some(org_uuid) = cipher.organization_uuid {
|
||||||
_log_event(
|
_log_event(
|
||||||
event.Type,
|
event.r#type,
|
||||||
cipher_uuid,
|
cipher_uuid,
|
||||||
&org_uuid,
|
&org_uuid,
|
||||||
&headers.user.uuid,
|
&headers.user.uuid,
|
||||||
@@ -263,7 +262,7 @@ pub async fn log_event(
|
|||||||
event_type: i32,
|
event_type: i32,
|
||||||
source_uuid: &str,
|
source_uuid: &str,
|
||||||
org_uuid: &str,
|
org_uuid: &str,
|
||||||
act_user_uuid: String,
|
act_user_uuid: &str,
|
||||||
device_type: i32,
|
device_type: i32,
|
||||||
ip: &IpAddr,
|
ip: &IpAddr,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
@@ -271,7 +270,7 @@ pub async fn log_event(
|
|||||||
if !CONFIG.org_events_enabled() {
|
if !CONFIG.org_events_enabled() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
|
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
@@ -289,7 +288,7 @@ async fn _log_event(
|
|||||||
let mut event = Event::new(event_type, event_date);
|
let mut event = Event::new(event_type, event_date);
|
||||||
match event_type {
|
match event_type {
|
||||||
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
// 1000..=1099 Are user events, they need to be logged via log_user_event()
|
||||||
// Collection Events
|
// Cipher Events
|
||||||
1100..=1199 => {
|
1100..=1199 => {
|
||||||
event.cipher_uuid = Some(String::from(source_uuid));
|
event.cipher_uuid = Some(String::from(source_uuid));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use rocket::serde::json::Json;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{EmptyResult, JsonResult, Notify, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
};
|
};
|
||||||
@@ -17,9 +17,9 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": folders_json,
|
"data": folders_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null,
|
"continuationToken": null,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -38,16 +38,17 @@ async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct FolderData {
|
pub struct FolderData {
|
||||||
pub Name: String,
|
pub name: String,
|
||||||
|
pub id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders", data = "<data>")]
|
#[post("/folders", data = "<data>")]
|
||||||
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner();
|
||||||
|
|
||||||
let mut folder = Folder::new(headers.user.uuid, data.Name);
|
let mut folder = Folder::new(headers.user.uuid, data.name);
|
||||||
|
|
||||||
folder.save(&mut conn).await?;
|
folder.save(&mut conn).await?;
|
||||||
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
|
||||||
@@ -56,25 +57,19 @@ async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/folders/<uuid>", data = "<data>")]
|
#[post("/folders/<uuid>", data = "<data>")]
|
||||||
async fn post_folder(
|
async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
uuid: &str,
|
|
||||||
data: JsonUpcase<FolderData>,
|
|
||||||
headers: Headers,
|
|
||||||
conn: DbConn,
|
|
||||||
nt: Notify<'_>,
|
|
||||||
) -> JsonResult {
|
|
||||||
put_folder(uuid, data, headers, conn, nt).await
|
put_folder(uuid, data, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/folders/<uuid>", data = "<data>")]
|
#[put("/folders/<uuid>", data = "<data>")]
|
||||||
async fn put_folder(
|
async fn put_folder(
|
||||||
uuid: &str,
|
uuid: &str,
|
||||||
data: JsonUpcase<FolderData>,
|
data: Json<FolderData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: FolderData = data.into_inner().data;
|
let data: FolderData = data.into_inner();
|
||||||
|
|
||||||
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
|
||||||
Some(folder) => folder,
|
Some(folder) => folder,
|
||||||
@@ -85,7 +80,7 @@ async fn put_folder(
|
|||||||
err!("Folder belongs to another user")
|
err!("Folder belongs to another user")
|
||||||
}
|
}
|
||||||
|
|
||||||
folder.name = data.Name;
|
folder.name = data.name;
|
||||||
|
|
||||||
folder.save(&mut conn).await?;
|
folder.save(&mut conn).await?;
|
||||||
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncT
|
|||||||
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
|
||||||
pub use events::{event_cleanup_job, log_event, log_user_event};
|
pub use events::{event_cleanup_job, log_event, log_user_event};
|
||||||
pub use sends::purge_sends;
|
pub use sends::purge_sends;
|
||||||
pub use two_factor::send_incomplete_2fa_notifications;
|
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
|
||||||
@@ -47,23 +46,22 @@ pub fn events_routes() -> Vec<Route> {
|
|||||||
//
|
//
|
||||||
// Move this somewhere else
|
// Move this somewhere else
|
||||||
//
|
//
|
||||||
use rocket::{serde::json::Json, Catcher, Route};
|
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{JsonResult, JsonUpcase, Notify, UpdateType},
|
api::{JsonResult, Notify, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::DbConn,
|
db::DbConn,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::get_reqwest_client,
|
util::{get_reqwest_client, parse_experimental_client_feature_flags},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct GlobalDomain {
|
struct GlobalDomain {
|
||||||
Type: i32,
|
r#type: i32,
|
||||||
Domains: Vec<String>,
|
domains: Vec<String>,
|
||||||
Excluded: bool,
|
excluded: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
|
||||||
@@ -83,38 +81,38 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
|
|||||||
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
|
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
|
||||||
|
|
||||||
for global in &mut globals {
|
for global in &mut globals {
|
||||||
global.Excluded = excluded_globals.contains(&global.Type);
|
global.excluded = excluded_globals.contains(&global.r#type);
|
||||||
}
|
}
|
||||||
|
|
||||||
if no_excluded {
|
if no_excluded {
|
||||||
globals.retain(|g| !g.Excluded);
|
globals.retain(|g| !g.excluded);
|
||||||
}
|
}
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"EquivalentDomains": equivalent_domains,
|
"equivalentDomains": equivalent_domains,
|
||||||
"GlobalEquivalentDomains": globals,
|
"globalEquivalentDomains": globals,
|
||||||
"Object": "domains",
|
"object": "domains",
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EquivDomainData {
|
struct EquivDomainData {
|
||||||
ExcludedGlobalEquivalentDomains: Option<Vec<i32>>,
|
excluded_global_equivalent_domains: Option<Vec<i32>>,
|
||||||
EquivalentDomains: Option<Vec<Vec<String>>>,
|
equivalent_domains: Option<Vec<Vec<String>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/settings/domains", data = "<data>")]
|
#[post("/settings/domains", data = "<data>")]
|
||||||
async fn post_eq_domains(
|
async fn post_eq_domains(
|
||||||
data: JsonUpcase<EquivDomainData>,
|
data: Json<EquivDomainData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
let data: EquivDomainData = data.into_inner().data;
|
let data: EquivDomainData = data.into_inner();
|
||||||
|
|
||||||
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
|
let excluded_globals = data.excluded_global_equivalent_domains.unwrap_or_default();
|
||||||
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
|
let equivalent_domains = data.equivalent_domains.unwrap_or_default();
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
use serde_json::to_string;
|
use serde_json::to_string;
|
||||||
@@ -130,12 +128,7 @@ async fn post_eq_domains(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/settings/domains", data = "<data>")]
|
#[put("/settings/domains", data = "<data>")]
|
||||||
async fn put_eq_domains(
|
async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
data: JsonUpcase<EquivDomainData>,
|
|
||||||
headers: Headers,
|
|
||||||
conn: DbConn,
|
|
||||||
nt: Notify<'_>,
|
|
||||||
) -> JsonResult {
|
|
||||||
post_eq_domains(data, headers, conn, nt).await
|
post_eq_domains(data, headers, conn, nt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,15 +152,15 @@ async fn hibp_breach(username: &str) -> JsonResult {
|
|||||||
Ok(Json(value))
|
Ok(Json(value))
|
||||||
} else {
|
} else {
|
||||||
Ok(Json(json!([{
|
Ok(Json(json!([{
|
||||||
"Name": "HaveIBeenPwned",
|
"name": "HaveIBeenPwned",
|
||||||
"Title": "Manual HIBP Check",
|
"title": "Manual HIBP Check",
|
||||||
"Domain": "haveibeenpwned.com",
|
"domain": "haveibeenpwned.com",
|
||||||
"BreachDate": "2019-08-18T00:00:00Z",
|
"breachDate": "2019-08-18T00:00:00Z",
|
||||||
"AddedDate": "2019-08-18T00:00:00Z",
|
"addedDate": "2019-08-18T00:00:00Z",
|
||||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
"description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
|
||||||
"LogoPath": "vw_static/hibp.png",
|
"logoPath": "vw_static/hibp.png",
|
||||||
"PwnCount": 0,
|
"pwnCount": 0,
|
||||||
"DataClasses": [
|
"dataClasses": [
|
||||||
"Error - No API key set!"
|
"Error - No API key set!"
|
||||||
]
|
]
|
||||||
}])))
|
}])))
|
||||||
@@ -193,17 +186,22 @@ fn version() -> Json<&'static str> {
|
|||||||
#[get("/config")]
|
#[get("/config")]
|
||||||
fn config() -> Json<Value> {
|
fn config() -> Json<Value> {
|
||||||
let domain = crate::CONFIG.domain();
|
let domain = crate::CONFIG.domain();
|
||||||
|
let mut feature_states =
|
||||||
|
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||||
|
// Force the new key rotation feature
|
||||||
|
feature_states.insert("key-rotation-improvements".to_string(), true);
|
||||||
Json(json!({
|
Json(json!({
|
||||||
// Note: The clients use this version to handle backwards compatibility concerns
|
// Note: The clients use this version to handle backwards compatibility concerns
|
||||||
// This means they expect a version that closely matches the Bitwarden server version
|
// This means they expect a version that closely matches the Bitwarden server version
|
||||||
// We should make sure that we keep this updated when we support the new server features
|
// We should make sure that we keep this updated when we support the new server features
|
||||||
// Version history:
|
// Version history:
|
||||||
// - Individual cipher key encryption: 2023.9.1
|
// - Individual cipher key encryption: 2023.9.1
|
||||||
"version": "2023.9.1",
|
"version": "2024.2.0",
|
||||||
"gitHash": option_env!("GIT_REV"),
|
"gitHash": option_env!("GIT_REV"),
|
||||||
"server": {
|
"server": {
|
||||||
"name": "Vaultwarden",
|
"name": "Vaultwarden",
|
||||||
"url": "https://github.com/dani-garcia/vaultwarden"
|
"url": "https://github.com/dani-garcia/vaultwarden",
|
||||||
|
"version": crate::VERSION
|
||||||
},
|
},
|
||||||
"environment": {
|
"environment": {
|
||||||
"vault": domain,
|
"vault": domain,
|
||||||
@@ -212,13 +210,7 @@ fn config() -> Json<Value> {
|
|||||||
"notifications": format!("{domain}/notifications"),
|
"notifications": format!("{domain}/notifications"),
|
||||||
"sso": "",
|
"sso": "",
|
||||||
},
|
},
|
||||||
"featureStates": {
|
"featureStates": feature_states,
|
||||||
// Any feature flags that we want the clients to use
|
|
||||||
// Can check the enabled ones at:
|
|
||||||
// https://vault.bitwarden.com/api/config
|
|
||||||
"autofill-v2": true,
|
|
||||||
"fido2-vault-credentials": true
|
|
||||||
},
|
|
||||||
"object": "config",
|
"object": "config",
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,13 +1,14 @@
|
|||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use rocket::{
|
use rocket::{
|
||||||
request::{self, FromRequest, Outcome},
|
request::{self, FromRequest, Outcome},
|
||||||
|
serde::json::Json,
|
||||||
Request, Route,
|
Request, Route,
|
||||||
};
|
};
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonUpcase},
|
api::EmptyResult,
|
||||||
auth,
|
auth,
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
mail, CONFIG,
|
mail, CONFIG,
|
||||||
@@ -18,43 +19,43 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct OrgImportGroupData {
|
struct OrgImportGroupData {
|
||||||
Name: String,
|
name: String,
|
||||||
ExternalId: String,
|
external_id: String,
|
||||||
MemberExternalIds: Vec<String>,
|
member_external_ids: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct OrgImportUserData {
|
struct OrgImportUserData {
|
||||||
Email: String,
|
email: String,
|
||||||
ExternalId: String,
|
external_id: String,
|
||||||
Deleted: bool,
|
deleted: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct OrgImportData {
|
struct OrgImportData {
|
||||||
Groups: Vec<OrgImportGroupData>,
|
groups: Vec<OrgImportGroupData>,
|
||||||
Members: Vec<OrgImportUserData>,
|
members: Vec<OrgImportUserData>,
|
||||||
OverwriteExisting: bool,
|
overwrite_existing: bool,
|
||||||
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
|
// largeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/public/organization/import", data = "<data>")]
|
#[post("/public/organization/import", data = "<data>")]
|
||||||
async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
|
||||||
// Most of the logic for this function can be found here
|
// Most of the logic for this function can be found here
|
||||||
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
|
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
|
||||||
|
|
||||||
let org_id = token.0;
|
let org_id = token.0;
|
||||||
let data = data.into_inner().data;
|
let data = data.into_inner();
|
||||||
|
|
||||||
for user_data in &data.Members {
|
for user_data in &data.members {
|
||||||
if user_data.Deleted {
|
if user_data.deleted {
|
||||||
// If user is marked for deletion and it exists, revoke it
|
// If user is marked for deletion and it exists, revoke it
|
||||||
if let Some(mut user_org) =
|
if let Some(mut user_org) =
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
// Only revoke a user if it is not the last confirmed owner
|
// Only revoke a user if it is not the last confirmed owner
|
||||||
let revoked = if user_org.atype == UserOrgType::Owner
|
let revoked = if user_org.atype == UserOrgType::Owner
|
||||||
@@ -72,27 +73,27 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
user_org.revoke()
|
user_org.revoke()
|
||||||
};
|
};
|
||||||
|
|
||||||
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
|
||||||
if revoked || ext_modified {
|
if revoked || ext_modified {
|
||||||
user_org.save(&mut conn).await?;
|
user_org.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If user is part of the organization, restore it
|
// If user is part of the organization, restore it
|
||||||
} else if let Some(mut user_org) =
|
} else if let Some(mut user_org) =
|
||||||
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
|
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
let restored = user_org.restore();
|
let restored = user_org.restore();
|
||||||
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
|
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
|
||||||
if restored || ext_modified {
|
if restored || ext_modified {
|
||||||
user_org.save(&mut conn).await?;
|
user_org.save(&mut conn).await?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If user is not part of the organization
|
// If user is not part of the organization
|
||||||
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
|
let user = match User::find_by_mail(&user_data.email, &mut conn).await {
|
||||||
Some(user) => user, // exists in vaultwarden
|
Some(user) => user, // exists in vaultwarden
|
||||||
None => {
|
None => {
|
||||||
// User does not exist yet
|
// User does not exist yet
|
||||||
let mut new_user = User::new(user_data.Email.clone());
|
let mut new_user = User::new(user_data.email.clone());
|
||||||
new_user.save(&mut conn).await?;
|
new_user.save(&mut conn).await?;
|
||||||
|
|
||||||
if !CONFIG.mail_enabled() {
|
if !CONFIG.mail_enabled() {
|
||||||
@@ -109,7 +110,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||||
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
|
new_org_user.set_external_id(Some(user_data.external_id.clone()));
|
||||||
new_org_user.access_all = false;
|
new_org_user.access_all = false;
|
||||||
new_org_user.atype = UserOrgType::User as i32;
|
new_org_user.atype = UserOrgType::User as i32;
|
||||||
new_org_user.status = user_org_status;
|
new_org_user.status = user_org_status;
|
||||||
@@ -123,7 +124,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
};
|
};
|
||||||
|
|
||||||
mail::send_invite(
|
mail::send_invite(
|
||||||
&user_data.Email,
|
&user_data.email,
|
||||||
&user.uuid,
|
&user.uuid,
|
||||||
Some(org_id.clone()),
|
Some(org_id.clone()),
|
||||||
Some(new_org_user.uuid),
|
Some(new_org_user.uuid),
|
||||||
@@ -136,12 +137,17 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.org_groups_enabled() {
|
if CONFIG.org_groups_enabled() {
|
||||||
for group_data in &data.Groups {
|
for group_data in &data.groups {
|
||||||
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await {
|
let group_uuid = match Group::find_by_external_id_and_org(&group_data.external_id, &org_id, &mut conn).await
|
||||||
|
{
|
||||||
Some(group) => group.uuid,
|
Some(group) => group.uuid,
|
||||||
None => {
|
None => {
|
||||||
let mut group =
|
let mut group = Group::new(
|
||||||
Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone()));
|
org_id.clone(),
|
||||||
|
group_data.name.clone(),
|
||||||
|
false,
|
||||||
|
Some(group_data.external_id.clone()),
|
||||||
|
);
|
||||||
group.save(&mut conn).await?;
|
group.save(&mut conn).await?;
|
||||||
group.uuid
|
group.uuid
|
||||||
}
|
}
|
||||||
@@ -149,7 +155,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
|
|
||||||
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
|
||||||
|
|
||||||
for ext_id in &group_data.MemberExternalIds {
|
for ext_id in &group_data.member_external_ids {
|
||||||
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
|
||||||
{
|
{
|
||||||
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
|
||||||
@@ -162,9 +168,9 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||||
if data.OverwriteExisting {
|
if data.overwrite_existing {
|
||||||
// Generate a HashSet to quickly verify if a member is listed or not.
|
// Generate a HashSet to quickly verify if a member is listed or not.
|
||||||
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
|
let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect();
|
||||||
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
|
||||||
if let Some(ref user_external_id) = user_org.external_id {
|
if let Some(ref user_external_id) = user_org.external_id {
|
||||||
if !sync_members.contains(user_external_id) {
|
if !sync_members.contains(user_external_id) {
|
||||||
@@ -209,19 +215,15 @@ impl<'r> FromRequest<'r> for PublicToken {
|
|||||||
Err(_) => err_handler!("Invalid claim"),
|
Err(_) => err_handler!("Invalid claim"),
|
||||||
};
|
};
|
||||||
// Check if time is between claims.nbf and claims.exp
|
// Check if time is between claims.nbf and claims.exp
|
||||||
let time_now = Utc::now().naive_utc().timestamp();
|
let time_now = Utc::now().timestamp();
|
||||||
if time_now < claims.nbf {
|
if time_now < claims.nbf {
|
||||||
err_handler!("Token issued in the future");
|
err_handler!("Token issued in the future");
|
||||||
}
|
}
|
||||||
if time_now > claims.exp {
|
if time_now > claims.exp {
|
||||||
err_handler!("Token expired");
|
err_handler!("Token expired");
|
||||||
}
|
}
|
||||||
// Check if claims.iss is host|claims.scope[0]
|
// Check if claims.iss is domain|claims.scope[0]
|
||||||
let host = match auth::Host::from_request(request).await {
|
let complete_host = format!("{}|{}", CONFIG.domain_origin(), claims.scope[0]);
|
||||||
Outcome::Success(host) => host,
|
|
||||||
_ => err_handler!("Error getting Host"),
|
|
||||||
};
|
|
||||||
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
|
|
||||||
if complete_host != claims.iss {
|
if complete_host != claims.iss {
|
||||||
err_handler!("Token not issued by this server");
|
err_handler!("Token not issued by this server");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
|
use num_traits::ToPrimitive;
|
||||||
use rocket::form::Form;
|
use rocket::form::Form;
|
||||||
use rocket::fs::NamedFile;
|
use rocket::fs::NamedFile;
|
||||||
use rocket::fs::TempFile;
|
use rocket::fs::TempFile;
|
||||||
@@ -8,17 +9,17 @@ use rocket::serde::json::Json;
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
|
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
|
||||||
auth::{ClientIp, Headers, Host},
|
auth::{ClientIp, Headers, Host},
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
util::SafeString,
|
util::{NumberOrString, SafeString},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available";
|
||||||
|
|
||||||
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
// The max file size allowed by Bitwarden clients and add an extra 5% to avoid issues
|
||||||
const SIZE_525_MB: u64 = 550_502_400;
|
const SIZE_525_MB: i64 = 550_502_400;
|
||||||
|
|
||||||
pub fn routes() -> Vec<rocket::Route> {
|
pub fn routes() -> Vec<rocket::Route> {
|
||||||
routes![
|
routes![
|
||||||
@@ -47,23 +48,26 @@ pub async fn purge_sends(pool: DbPool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct SendData {
|
pub struct SendData {
|
||||||
Type: i32,
|
r#type: i32,
|
||||||
Key: String,
|
key: String,
|
||||||
Password: Option<String>,
|
password: Option<String>,
|
||||||
MaxAccessCount: Option<NumberOrString>,
|
max_access_count: Option<NumberOrString>,
|
||||||
ExpirationDate: Option<DateTime<Utc>>,
|
expiration_date: Option<DateTime<Utc>>,
|
||||||
DeletionDate: DateTime<Utc>,
|
deletion_date: DateTime<Utc>,
|
||||||
Disabled: bool,
|
disabled: bool,
|
||||||
HideEmail: Option<bool>,
|
hide_email: Option<bool>,
|
||||||
|
|
||||||
// Data field
|
// Data field
|
||||||
Name: String,
|
name: String,
|
||||||
Notes: Option<String>,
|
notes: Option<String>,
|
||||||
Text: Option<Value>,
|
text: Option<Value>,
|
||||||
File: Option<Value>,
|
file: Option<Value>,
|
||||||
FileLength: Option<NumberOrString>,
|
file_length: Option<NumberOrString>,
|
||||||
|
|
||||||
|
// Used for key rotations
|
||||||
|
pub id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||||
@@ -92,7 +96,7 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em
|
|||||||
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
/// Ref: https://bitwarden.com/help/article/policies/#send-options
|
||||||
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
|
||||||
let user_uuid = &headers.user.uuid;
|
let user_uuid = &headers.user.uuid;
|
||||||
let hide_email = data.HideEmail.unwrap_or(false);
|
let hide_email = data.hide_email.unwrap_or(false);
|
||||||
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
|
||||||
err!(
|
err!(
|
||||||
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
"Due to an Enterprise Policy, you are not allowed to hide your email address \
|
||||||
@@ -103,40 +107,40 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||||
let data_val = if data.Type == SendType::Text as i32 {
|
let data_val = if data.r#type == SendType::Text as i32 {
|
||||||
data.Text
|
data.text
|
||||||
} else if data.Type == SendType::File as i32 {
|
} else if data.r#type == SendType::File as i32 {
|
||||||
data.File
|
data.file
|
||||||
} else {
|
} else {
|
||||||
err!("Invalid Send type")
|
err!("Invalid Send type")
|
||||||
};
|
};
|
||||||
|
|
||||||
let data_str = if let Some(mut d) = data_val {
|
let data_str = if let Some(mut d) = data_val {
|
||||||
d.as_object_mut().and_then(|o| o.remove("Response"));
|
d.as_object_mut().and_then(|o| o.remove("response"));
|
||||||
serde_json::to_string(&d)?
|
serde_json::to_string(&d)?
|
||||||
} else {
|
} else {
|
||||||
err!("Send data not provided");
|
err!("Send data not provided");
|
||||||
};
|
};
|
||||||
|
|
||||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||||
err!(
|
err!(
|
||||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
|
let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc());
|
||||||
send.user_uuid = Some(user_uuid);
|
send.user_uuid = Some(user_uuid);
|
||||||
send.notes = data.Notes;
|
send.notes = data.notes;
|
||||||
send.max_access_count = match data.MaxAccessCount {
|
send.max_access_count = match data.max_access_count {
|
||||||
Some(m) => Some(m.into_i32()?),
|
Some(m) => Some(m.into_i32()?),
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
|
||||||
send.disabled = data.Disabled;
|
send.disabled = data.disabled;
|
||||||
send.hide_email = data.HideEmail;
|
send.hide_email = data.hide_email;
|
||||||
send.atype = data.Type;
|
send.atype = data.r#type;
|
||||||
|
|
||||||
send.set_password(data.Password.as_deref());
|
send.set_password(data.password.as_deref());
|
||||||
|
|
||||||
Ok(send)
|
Ok(send)
|
||||||
}
|
}
|
||||||
@@ -147,9 +151,9 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": sends_json,
|
"data": sends_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null
|
"continuationToken": null
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,13 +172,13 @@ async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/sends", data = "<data>")]
|
#[post("/sends", data = "<data>")]
|
||||||
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
async fn post_send(data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let data: SendData = data.into_inner().data;
|
let data: SendData = data.into_inner();
|
||||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||||
|
|
||||||
if data.Type == SendType::File as i32 {
|
if data.r#type == SendType::File as i32 {
|
||||||
err!("File sends should use /api/sends/file")
|
err!("File sends should use /api/sends/file")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,7 +198,7 @@ async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbCon
|
|||||||
|
|
||||||
#[derive(FromForm)]
|
#[derive(FromForm)]
|
||||||
struct UploadData<'f> {
|
struct UploadData<'f> {
|
||||||
model: Json<crate::util::UpCase<SendData>>,
|
model: Json<SendData>,
|
||||||
data: TempFile<'f>,
|
data: TempFile<'f>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -214,32 +218,43 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||||||
model,
|
model,
|
||||||
mut data,
|
mut data,
|
||||||
} = data.into_inner();
|
} = data.into_inner();
|
||||||
let model = model.into_inner().data;
|
let model = model.into_inner();
|
||||||
|
|
||||||
|
let Some(size) = data.len().to_i64() else {
|
||||||
|
err!("Invalid send size");
|
||||||
|
};
|
||||||
|
if size < 0 {
|
||||||
|
err!("Send size can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let size_limit = match CONFIG.user_attachment_limit() {
|
let size_limit = match CONFIG.user_send_limit() {
|
||||||
Some(0) => err!("File uploads are disabled"),
|
Some(0) => err!("File uploads are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||||
|
err!("Existing sends overflow")
|
||||||
|
};
|
||||||
|
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||||
|
err!("Send size overflow");
|
||||||
|
};
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Send storage limit reached! Delete some sends to free up space")
|
||||||
}
|
}
|
||||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
i64::clamp(left, 0, SIZE_525_MB)
|
||||||
}
|
}
|
||||||
None => SIZE_525_MB,
|
None => SIZE_525_MB,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if size > size_limit {
|
||||||
|
err!("Send storage limit exceeded with this file");
|
||||||
|
}
|
||||||
|
|
||||||
let mut send = create_send(model, headers.user.uuid)?;
|
let mut send = create_send(model, headers.user.uuid)?;
|
||||||
if send.atype != SendType::File as i32 {
|
if send.atype != SendType::File as i32 {
|
||||||
err!("Send content is not a file");
|
err!("Send content is not a file");
|
||||||
}
|
}
|
||||||
|
|
||||||
let size = data.len();
|
|
||||||
if size > size_limit {
|
|
||||||
err!("Attachment storage limit exceeded with this file");
|
|
||||||
}
|
|
||||||
|
|
||||||
let file_id = crate::crypto::generate_send_id();
|
let file_id = crate::crypto::generate_send_id();
|
||||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||||
let file_path = folder_path.join(&file_id);
|
let file_path = folder_path.join(&file_id);
|
||||||
@@ -251,9 +266,9 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||||||
|
|
||||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id));
|
o.insert(String::from("id"), Value::String(file_id));
|
||||||
o.insert(String::from("Size"), Value::Number(size.into()));
|
o.insert(String::from("size"), Value::Number(size.into()));
|
||||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32)));
|
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(size)));
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
|
|
||||||
@@ -273,36 +288,44 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||||||
|
|
||||||
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
|
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
|
||||||
#[post("/sends/file/v2", data = "<data>")]
|
#[post("/sends/file/v2", data = "<data>")]
|
||||||
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let data = data.into_inner().data;
|
let data = data.into_inner();
|
||||||
|
|
||||||
if data.Type != SendType::File as i32 {
|
if data.r#type != SendType::File as i32 {
|
||||||
err!("Send content is not a file");
|
err!("Send content is not a file");
|
||||||
}
|
}
|
||||||
|
|
||||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let file_length = match &data.FileLength {
|
let file_length = match &data.file_length {
|
||||||
Some(m) => Some(m.into_i32()?),
|
Some(m) => m.into_i64()?,
|
||||||
_ => None,
|
_ => err!("Invalid send length"),
|
||||||
};
|
};
|
||||||
|
if file_length < 0 {
|
||||||
|
err!("Send size can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
let size_limit = match CONFIG.user_attachment_limit() {
|
let size_limit = match CONFIG.user_send_limit() {
|
||||||
Some(0) => err!("File uploads are disabled"),
|
Some(0) => err!("File uploads are disabled"),
|
||||||
Some(limit_kb) => {
|
Some(limit_kb) => {
|
||||||
let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await;
|
let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else {
|
||||||
|
err!("Existing sends overflow")
|
||||||
|
};
|
||||||
|
let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else {
|
||||||
|
err!("Send size overflow");
|
||||||
|
};
|
||||||
if left <= 0 {
|
if left <= 0 {
|
||||||
err!("Attachment storage limit reached! Delete some attachments to free up space")
|
err!("Send storage limit reached! Delete some sends to free up space")
|
||||||
}
|
}
|
||||||
std::cmp::Ord::max(left as u64, SIZE_525_MB)
|
i64::clamp(left, 0, SIZE_525_MB)
|
||||||
}
|
}
|
||||||
None => SIZE_525_MB,
|
None => SIZE_525_MB,
|
||||||
};
|
};
|
||||||
|
|
||||||
if file_length.is_some() && file_length.unwrap() as u64 > size_limit {
|
if file_length > size_limit {
|
||||||
err!("Attachment storage limit exceeded with this file");
|
err!("Send storage limit exceeded with this file");
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut send = create_send(data, headers.user.uuid)?;
|
let mut send = create_send(data, headers.user.uuid)?;
|
||||||
@@ -311,9 +334,9 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
|
|||||||
|
|
||||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||||
if let Some(o) = data_value.as_object_mut() {
|
if let Some(o) = data_value.as_object_mut() {
|
||||||
o.insert(String::from("Id"), Value::String(file_id.clone()));
|
o.insert(String::from("id"), Value::String(file_id.clone()));
|
||||||
o.insert(String::from("Size"), Value::Number(file_length.unwrap().into()));
|
o.insert(String::from("size"), Value::Number(file_length.into()));
|
||||||
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap())));
|
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(file_length)));
|
||||||
}
|
}
|
||||||
send.data = serde_json::to_string(&data_value)?;
|
send.data = serde_json::to_string(&data_value)?;
|
||||||
send.save(&mut conn).await?;
|
send.save(&mut conn).await?;
|
||||||
@@ -372,15 +395,15 @@ async fn post_send_file_v2_data(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct SendAccessData {
|
pub struct SendAccessData {
|
||||||
pub Password: Option<String>,
|
pub password: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/sends/access/<access_id>", data = "<data>")]
|
#[post("/sends/access/<access_id>", data = "<data>")]
|
||||||
async fn post_access(
|
async fn post_access(
|
||||||
access_id: &str,
|
access_id: &str,
|
||||||
data: JsonUpcase<SendAccessData>,
|
data: Json<SendAccessData>,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
ip: ClientIp,
|
ip: ClientIp,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
@@ -411,7 +434,7 @@ async fn post_access(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if send.password_hash.is_some() {
|
if send.password_hash.is_some() {
|
||||||
match data.into_inner().data.Password {
|
match data.into_inner().password {
|
||||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
|
||||||
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
|
||||||
@@ -441,7 +464,7 @@ async fn post_access(
|
|||||||
async fn post_access_file(
|
async fn post_access_file(
|
||||||
send_id: &str,
|
send_id: &str,
|
||||||
file_id: &str,
|
file_id: &str,
|
||||||
data: JsonUpcase<SendAccessData>,
|
data: Json<SendAccessData>,
|
||||||
host: Host,
|
host: Host,
|
||||||
mut conn: DbConn,
|
mut conn: DbConn,
|
||||||
nt: Notify<'_>,
|
nt: Notify<'_>,
|
||||||
@@ -472,7 +495,7 @@ async fn post_access_file(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if send.password_hash.is_some() {
|
if send.password_hash.is_some() {
|
||||||
match data.into_inner().data.Password {
|
match data.into_inner().password {
|
||||||
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
|
||||||
Some(_) => err!("Invalid password."),
|
Some(_) => err!("Invalid password."),
|
||||||
None => err_code!("Password not provided", 401),
|
None => err_code!("Password not provided", 401),
|
||||||
@@ -495,9 +518,9 @@ async fn post_access_file(
|
|||||||
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
||||||
let token = crate::auth::encode_jwt(&token_claims);
|
let token = crate::auth::encode_jwt(&token_claims);
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Object": "send-fileDownload",
|
"object": "send-fileDownload",
|
||||||
"Id": file_id,
|
"id": file_id,
|
||||||
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
"url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -512,16 +535,10 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/sends/<id>", data = "<data>")]
|
#[put("/sends/<id>", data = "<data>")]
|
||||||
async fn put_send(
|
async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||||
id: &str,
|
|
||||||
data: JsonUpcase<SendData>,
|
|
||||||
headers: Headers,
|
|
||||||
mut conn: DbConn,
|
|
||||||
nt: Notify<'_>,
|
|
||||||
) -> JsonResult {
|
|
||||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||||
|
|
||||||
let data: SendData = data.into_inner().data;
|
let data: SendData = data.into_inner();
|
||||||
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
|
||||||
|
|
||||||
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
let mut send = match Send::find_by_uuid(id, &mut conn).await {
|
||||||
@@ -529,19 +546,38 @@ async fn put_send(
|
|||||||
None => err!("Send not found"),
|
None => err!("Send not found"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_send_from_data(
|
||||||
|
send: &mut Send,
|
||||||
|
data: SendData,
|
||||||
|
headers: &Headers,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
nt: &Notify<'_>,
|
||||||
|
ut: UpdateType,
|
||||||
|
) -> EmptyResult {
|
||||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
err!("Send is not owned by user")
|
err!("Send is not owned by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
if send.atype != data.Type {
|
if send.atype != data.r#type {
|
||||||
err!("Sends can't change type")
|
err!("Sends can't change type")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||||
|
err!(
|
||||||
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||||
// so we only need to update the data field in the Text case
|
// so we only need to update the data field in the Text case
|
||||||
if data.Type == SendType::Text as i32 {
|
if data.r#type == SendType::Text as i32 {
|
||||||
let data_str = if let Some(mut d) = data.Text {
|
let data_str = if let Some(mut d) = data.text {
|
||||||
d.as_object_mut().and_then(|d| d.remove("Response"));
|
d.as_object_mut().and_then(|d| d.remove("response"));
|
||||||
serde_json::to_string(&d)?
|
serde_json::to_string(&d)?
|
||||||
} else {
|
} else {
|
||||||
err!("Send data not provided");
|
err!("Send data not provided");
|
||||||
@@ -549,39 +585,28 @@ async fn put_send(
|
|||||||
send.data = data_str;
|
send.data = data_str;
|
||||||
}
|
}
|
||||||
|
|
||||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
send.name = data.name;
|
||||||
err!(
|
send.akey = data.key;
|
||||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
send.deletion_date = data.deletion_date.naive_utc();
|
||||||
);
|
send.notes = data.notes;
|
||||||
}
|
send.max_access_count = match data.max_access_count {
|
||||||
send.name = data.Name;
|
|
||||||
send.akey = data.Key;
|
|
||||||
send.deletion_date = data.DeletionDate.naive_utc();
|
|
||||||
send.notes = data.Notes;
|
|
||||||
send.max_access_count = match data.MaxAccessCount {
|
|
||||||
Some(m) => Some(m.into_i32()?),
|
Some(m) => Some(m.into_i32()?),
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
|
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
|
||||||
send.hide_email = data.HideEmail;
|
send.hide_email = data.hide_email;
|
||||||
send.disabled = data.Disabled;
|
send.disabled = data.disabled;
|
||||||
|
|
||||||
// Only change the value if it's present
|
// Only change the value if it's present
|
||||||
if let Some(password) = data.Password {
|
if let Some(password) = data.password {
|
||||||
send.set_password(Some(&password));
|
send.set_password(Some(&password));
|
||||||
}
|
}
|
||||||
|
|
||||||
send.save(&mut conn).await?;
|
send.save(conn).await?;
|
||||||
nt.send_send_update(
|
if ut != UpdateType::None {
|
||||||
UpdateType::SyncSendUpdate,
|
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
|
||||||
&send,
|
}
|
||||||
&send.update_users_revision(&mut conn).await,
|
Ok(())
|
||||||
&headers.device.uuid,
|
|
||||||
&mut conn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/sends/<id>")]
|
#[delete("/sends/<id>")]
|
||||||
|
|||||||
@@ -3,16 +3,14 @@ use rocket::serde::json::Json;
|
|||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, PasswordOrOtpData},
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
|
|
||||||
NumberOrString, PasswordData,
|
|
||||||
},
|
|
||||||
auth::{ClientIp, Headers},
|
auth::{ClientIp, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{
|
db::{
|
||||||
models::{EventType, TwoFactor, TwoFactorType},
|
models::{EventType, TwoFactor, TwoFactorType},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
|
util::NumberOrString,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use crate::config::CONFIG;
|
pub use crate::config::CONFIG;
|
||||||
@@ -22,13 +20,11 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-authenticator", data = "<data>")]
|
#[post("/two-factor/get-authenticator", data = "<data>")]
|
||||||
async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_authenticator(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let type_ = TwoFactorType::Authenticator as i32;
|
let type_ = TwoFactorType::Authenticator as i32;
|
||||||
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
|
||||||
@@ -39,36 +35,35 @@ async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Key": key,
|
"key": key,
|
||||||
"Object": "twoFactorAuthenticator"
|
"object": "twoFactorAuthenticator"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EnableAuthenticatorData {
|
struct EnableAuthenticatorData {
|
||||||
MasterPasswordHash: String,
|
key: String,
|
||||||
Key: String,
|
token: NumberOrString,
|
||||||
Token: NumberOrString,
|
master_password_hash: Option<String>,
|
||||||
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/authenticator", data = "<data>")]
|
#[post("/two-factor/authenticator", data = "<data>")]
|
||||||
async fn activate_authenticator(
|
async fn activate_authenticator(data: Json<EnableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<EnableAuthenticatorData>,
|
let data: EnableAuthenticatorData = data.into_inner();
|
||||||
headers: Headers,
|
let key = data.key;
|
||||||
mut conn: DbConn,
|
let token = data.token.into_string();
|
||||||
) -> JsonResult {
|
|
||||||
let data: EnableAuthenticatorData = data.into_inner().data;
|
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
let key = data.Key;
|
|
||||||
let token = data.Token.into_string();
|
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
master_password_hash: data.master_password_hash,
|
||||||
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Validate key as base32 and 20 bytes length
|
// Validate key as base32 and 20 bytes length
|
||||||
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
|
||||||
@@ -88,18 +83,14 @@ async fn activate_authenticator(
|
|||||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
"Key": key,
|
"key": key,
|
||||||
"Object": "twoFactorAuthenticator"
|
"object": "twoFactorAuthenticator"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/authenticator", data = "<data>")]
|
#[put("/two-factor/authenticator", data = "<data>")]
|
||||||
async fn activate_authenticator_put(
|
async fn activate_authenticator_put(data: Json<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
data: JsonUpcase<EnableAuthenticatorData>,
|
|
||||||
headers: Headers,
|
|
||||||
conn: DbConn,
|
|
||||||
) -> JsonResult {
|
|
||||||
activate_authenticator(data, headers, conn).await
|
activate_authenticator(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,8 +145,8 @@ pub async fn validate_totp_code(
|
|||||||
let time = (current_timestamp + step * 30i64) as u64;
|
let time = (current_timestamp + step * 30i64) as u64;
|
||||||
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
|
||||||
|
|
||||||
// Check the the given code equals the generated and if the time_step is larger then the one last used.
|
// Check the given code equals the generated and if the time_step is larger then the one last used.
|
||||||
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
|
if generated == totp_code && time_step > twofactor.last_used {
|
||||||
// If the step does not equals 0 the time is drifted either server or client side.
|
// If the step does not equals 0 the time is drifted either server or client side.
|
||||||
if step != 0 {
|
if step != 0 {
|
||||||
warn!("TOTP Time drift detected. The step offset is {}", step);
|
warn!("TOTP Time drift detected. The step offset is {}", step);
|
||||||
@@ -163,10 +154,10 @@ pub async fn validate_totp_code(
|
|||||||
|
|
||||||
// Save the last used time step so only totp time steps higher then this one are allowed.
|
// Save the last used time step so only totp time steps higher then this one are allowed.
|
||||||
// This will also save a newly created twofactor if the code is correct.
|
// This will also save a newly created twofactor if the code is correct.
|
||||||
twofactor.last_used = time_step as i32;
|
twofactor.last_used = time_step;
|
||||||
twofactor.save(conn).await?;
|
twofactor.save(conn).await?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
|
} else if generated == totp_code && time_step <= twofactor.last_used {
|
||||||
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
|
||||||
err!(
|
err!(
|
||||||
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ use rocket::Route;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult,
|
||||||
PasswordData,
|
PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
@@ -92,14 +92,13 @@ impl DuoStatus {
|
|||||||
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
|
||||||
|
|
||||||
#[post("/two-factor/get-duo", data = "<data>")]
|
#[post("/two-factor/get-duo", data = "<data>")]
|
||||||
async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let data = get_user_duo_data(&headers.user.uuid, &mut conn).await;
|
let data = get_user_duo_data(&user.uuid, &mut conn).await;
|
||||||
|
|
||||||
let (enabled, data) = match data {
|
let (enabled, data) = match data {
|
||||||
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
DuoStatus::Global(_) => (true, Some(DuoData::secret())),
|
||||||
@@ -110,16 +109,16 @@ async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbC
|
|||||||
|
|
||||||
let json = if let Some(data) = data {
|
let json = if let Some(data) = data {
|
||||||
json!({
|
json!({
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Host": data.host,
|
"host": data.host,
|
||||||
"SecretKey": data.sk,
|
"secretKey": data.sk,
|
||||||
"IntegrationKey": data.ik,
|
"integrationKey": data.ik,
|
||||||
"Object": "twoFactorDuo"
|
"object": "twoFactorDuo"
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
json!({
|
json!({
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Object": "twoFactorDuo"
|
"object": "twoFactorDuo"
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -127,20 +126,21 @@ async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbC
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case, dead_code)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EnableDuoData {
|
struct EnableDuoData {
|
||||||
MasterPasswordHash: String,
|
host: String,
|
||||||
Host: String,
|
secret_key: String,
|
||||||
SecretKey: String,
|
integration_key: String,
|
||||||
IntegrationKey: String,
|
master_password_hash: Option<String>,
|
||||||
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<EnableDuoData> for DuoData {
|
impl From<EnableDuoData> for DuoData {
|
||||||
fn from(d: EnableDuoData) -> Self {
|
fn from(d: EnableDuoData) -> Self {
|
||||||
Self {
|
Self {
|
||||||
host: d.Host,
|
host: d.host,
|
||||||
ik: d.IntegrationKey,
|
ik: d.integration_key,
|
||||||
sk: d.SecretKey,
|
sk: d.secret_key,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -151,17 +151,20 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
|
|||||||
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
|
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
|
||||||
}
|
}
|
||||||
|
|
||||||
!empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey)
|
!empty_or_default(&data.host) && !empty_or_default(&data.secret_key) && !empty_or_default(&data.integration_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/duo", data = "<data>")]
|
#[post("/two-factor/duo", data = "<data>")]
|
||||||
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn activate_duo(data: Json<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: EnableDuoData = data.into_inner().data;
|
let data: EnableDuoData = data.into_inner();
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
master_password_hash: data.master_password_hash.clone(),
|
||||||
|
otp: data.otp.clone(),
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let (data, data_str) = if check_duo_fields_custom(&data) {
|
let (data, data_str) = if check_duo_fields_custom(&data) {
|
||||||
let data_req: DuoData = data.into();
|
let data_req: DuoData = data.into();
|
||||||
@@ -181,16 +184,16 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
|
|||||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
"Host": data.host,
|
"host": data.host,
|
||||||
"SecretKey": data.sk,
|
"secretKey": data.sk,
|
||||||
"IntegrationKey": data.ik,
|
"integrationKey": data.ik,
|
||||||
"Object": "twoFactorDuo"
|
"object": "twoFactorDuo"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/duo", data = "<data>")]
|
#[put("/two-factor/duo", data = "<data>")]
|
||||||
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn activate_duo_put(data: Json<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
activate_duo(data, headers, conn).await
|
activate_duo(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
crypto,
|
crypto,
|
||||||
db::{
|
db::{
|
||||||
models::{EventType, TwoFactor, TwoFactorType},
|
models::{EventType, TwoFactor, TwoFactorType, User},
|
||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
@@ -22,28 +22,28 @@ pub fn routes() -> Vec<Route> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct SendEmailLoginData {
|
struct SendEmailLoginData {
|
||||||
Email: String,
|
email: String,
|
||||||
MasterPasswordHash: String,
|
master_password_hash: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// User is trying to login and wants to use email 2FA.
|
/// User is trying to login and wants to use email 2FA.
|
||||||
/// Does not require Bearer token
|
/// Does not require Bearer token
|
||||||
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
|
||||||
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
|
async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: SendEmailLoginData = data.into_inner().data;
|
let data: SendEmailLoginData = data.into_inner();
|
||||||
|
|
||||||
use crate::db::models::User;
|
use crate::db::models::User;
|
||||||
|
|
||||||
// Get the user
|
// Get the user
|
||||||
let user = match User::find_by_mail(&data.Email, &mut conn).await {
|
let user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!("Username or password is incorrect. Try again."),
|
None => err!("Username or password is incorrect. Try again."),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check password
|
// Check password
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
if !user.check_valid_password(&data.master_password_hash) {
|
||||||
err!("Username or password is incorrect. Try again.")
|
err!("Username or password is incorrect. Try again.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,13 +76,11 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
|||||||
|
|
||||||
/// When user clicks on Manage email 2FA show the user the related information
|
/// When user clicks on Manage email 2FA show the user the related information
|
||||||
#[post("/two-factor/get-email", data = "<data>")]
|
#[post("/two-factor/get-email", data = "<data>")]
|
||||||
async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_email(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (enabled, mfa_email) =
|
let (enabled, mfa_email) =
|
||||||
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await {
|
||||||
@@ -94,29 +92,33 @@ async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: D
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Email": mfa_email,
|
"email": mfa_email,
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Object": "twoFactorEmail"
|
"object": "twoFactorEmail"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct SendEmailData {
|
struct SendEmailData {
|
||||||
/// Email where 2FA codes will be sent to, can be different than user email account.
|
/// Email where 2FA codes will be sent to, can be different than user email account.
|
||||||
Email: String,
|
email: String,
|
||||||
MasterPasswordHash: String,
|
master_password_hash: Option<String>,
|
||||||
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
|
||||||
#[post("/two-factor/send-email", data = "<data>")]
|
#[post("/two-factor/send-email", data = "<data>")]
|
||||||
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
async fn send_email(data: Json<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
let data: SendEmailData = data.into_inner().data;
|
let data: SendEmailData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
master_password_hash: data.master_password_hash,
|
||||||
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, false, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if !CONFIG._enable_email_2fa() {
|
if !CONFIG._enable_email_2fa() {
|
||||||
err!("Email 2FA is disabled")
|
err!("Email 2FA is disabled")
|
||||||
@@ -129,7 +131,7 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||||||
}
|
}
|
||||||
|
|
||||||
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||||
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
|
let twofactor_data = EmailTokenData::new(data.email, generated_token);
|
||||||
|
|
||||||
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||||
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
|
||||||
@@ -141,22 +143,27 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
#[derive(Deserialize, Serialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EmailData {
|
struct EmailData {
|
||||||
Email: String,
|
email: String,
|
||||||
MasterPasswordHash: String,
|
token: String,
|
||||||
Token: String,
|
master_password_hash: Option<String>,
|
||||||
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify email belongs to user and can be used for 2FA email codes.
|
/// Verify email belongs to user and can be used for 2FA email codes.
|
||||||
#[put("/two-factor/email", data = "<data>")]
|
#[put("/two-factor/email", data = "<data>")]
|
||||||
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: EmailData = data.into_inner().data;
|
let data: EmailData = data.into_inner();
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
// This is the last step in the verification process, delete the otp directly afterwards
|
||||||
err!("Invalid password");
|
PasswordOrOtpData {
|
||||||
|
master_password_hash: data.master_password_hash,
|
||||||
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
let type_ = TwoFactorType::EmailVerificationChallenge as i32;
|
||||||
let mut twofactor =
|
let mut twofactor =
|
||||||
@@ -169,7 +176,7 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
|||||||
_ => err!("No token available"),
|
_ => err!("No token available"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !crypto::ct_eq(issued_token, data.Token) {
|
if !crypto::ct_eq(issued_token, data.token) {
|
||||||
err!("Token is invalid")
|
err!("Token is invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,9 +190,9 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
|
|||||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Email": email_data.email,
|
"email": email_data.email,
|
||||||
"Enabled": "true",
|
"enabled": "true",
|
||||||
"Object": "twoFactorEmail"
|
"object": "twoFactorEmail"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -225,9 +232,9 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
|
|||||||
twofactor.data = email_data.to_json();
|
twofactor.data = email_data.to_json();
|
||||||
twofactor.save(conn).await?;
|
twofactor.save(conn).await?;
|
||||||
|
|
||||||
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
|
let date = DateTime::from_timestamp(email_data.token_sent, 0).expect("Email token timestamp invalid.").naive_utc();
|
||||||
let max_time = CONFIG.email_expiration_time() as i64;
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||||
err!(
|
err!(
|
||||||
"Token has expired",
|
"Token has expired",
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
@@ -258,14 +265,14 @@ impl EmailTokenData {
|
|||||||
EmailTokenData {
|
EmailTokenData {
|
||||||
email,
|
email,
|
||||||
last_token: Some(token),
|
last_token: Some(token),
|
||||||
token_sent: Utc::now().naive_utc().timestamp(),
|
token_sent: Utc::now().timestamp(),
|
||||||
attempts: 0,
|
attempts: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_token(&mut self, token: String) {
|
pub fn set_token(&mut self, token: String) {
|
||||||
self.last_token = Some(token);
|
self.last_token = Some(token);
|
||||||
self.token_sent = Utc::now().naive_utc().timestamp();
|
self.token_sent = Utc::now().timestamp();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_token(&mut self) {
|
pub fn reset_token(&mut self) {
|
||||||
@@ -290,6 +297,15 @@ impl EmailTokenData {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn activate_email_2fa(user: &User, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
if user.verified_at.is_none() {
|
||||||
|
err!("Auto-enabling of email 2FA failed because the users email address has not been verified!");
|
||||||
|
}
|
||||||
|
let twofactor_data = EmailTokenData::new(user.email.clone(), String::new());
|
||||||
|
let twofactor = TwoFactor::new(user.uuid.clone(), TwoFactorType::Email, twofactor_data.to_json());
|
||||||
|
twofactor.save(conn).await
|
||||||
|
}
|
||||||
|
|
||||||
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
|
/// Takes an email address and obscures it by replacing it with asterisks except two characters.
|
||||||
pub fn obscure_email(email: &str) -> String {
|
pub fn obscure_email(email: &str) -> String {
|
||||||
let split: Vec<&str> = email.rsplitn(2, '@').collect();
|
let split: Vec<&str> = email.rsplitn(2, '@').collect();
|
||||||
@@ -311,6 +327,14 @@ pub fn obscure_email(email: &str) -> String {
|
|||||||
format!("{}@{}", new_name, &domain)
|
format!("{}@{}", new_name, &domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_and_activate_email_2fa(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
if let Some(user) = User::find_by_uuid(user_uuid, conn).await {
|
||||||
|
activate_email_2fa(&user, conn).await
|
||||||
|
} else {
|
||||||
|
err!("User not found!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -1,20 +1,26 @@
|
|||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use data_encoding::BASE32;
|
use data_encoding::BASE32;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
|
api::{
|
||||||
|
core::{log_event, log_user_event},
|
||||||
|
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||||
|
},
|
||||||
auth::{ClientHeaders, Headers},
|
auth::{ClientHeaders, Headers},
|
||||||
crypto,
|
crypto,
|
||||||
db::{models::*, DbConn, DbPool},
|
db::{models::*, DbConn, DbPool},
|
||||||
mail, CONFIG,
|
mail,
|
||||||
|
util::NumberOrString,
|
||||||
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod authenticator;
|
pub mod authenticator;
|
||||||
pub mod duo;
|
pub mod duo;
|
||||||
pub mod email;
|
pub mod email;
|
||||||
|
pub mod protected_actions;
|
||||||
pub mod webauthn;
|
pub mod webauthn;
|
||||||
pub mod yubikey;
|
pub mod yubikey;
|
||||||
|
|
||||||
@@ -33,6 +39,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes.append(&mut email::routes());
|
routes.append(&mut email::routes());
|
||||||
routes.append(&mut webauthn::routes());
|
routes.append(&mut webauthn::routes());
|
||||||
routes.append(&mut yubikey::routes());
|
routes.append(&mut yubikey::routes());
|
||||||
|
routes.append(&mut protected_actions::routes());
|
||||||
|
|
||||||
routes
|
routes
|
||||||
}
|
}
|
||||||
@@ -43,59 +50,58 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||||||
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
"Data": twofactors_json,
|
"data": twofactors_json,
|
||||||
"Object": "list",
|
"object": "list",
|
||||||
"ContinuationToken": null,
|
"continuationToken": null,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-recover", data = "<data>")]
|
#[post("/two-factor/get-recover", data = "<data>")]
|
||||||
fn get_recover(data: JsonUpcase<PasswordData>, headers: Headers) -> JsonResult {
|
async fn get_recover(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, true, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Code": user.totp_recover,
|
"code": user.totp_recover,
|
||||||
"Object": "twoFactorRecover"
|
"object": "twoFactorRecover"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct RecoverTwoFactor {
|
struct RecoverTwoFactor {
|
||||||
MasterPasswordHash: String,
|
master_password_hash: String,
|
||||||
Email: String,
|
email: String,
|
||||||
RecoveryCode: String,
|
recovery_code: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/recover", data = "<data>")]
|
#[post("/two-factor/recover", data = "<data>")]
|
||||||
async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
|
||||||
let data: RecoverTwoFactor = data.into_inner().data;
|
let data: RecoverTwoFactor = data.into_inner();
|
||||||
|
|
||||||
use crate::db::models::User;
|
use crate::db::models::User;
|
||||||
|
|
||||||
// Get the user
|
// Get the user
|
||||||
let mut user = match User::find_by_mail(&data.Email, &mut conn).await {
|
let mut user = match User::find_by_mail(&data.email, &mut conn).await {
|
||||||
Some(user) => user,
|
Some(user) => user,
|
||||||
None => err!("Username or password is incorrect. Try again."),
|
None => err!("Username or password is incorrect. Try again."),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check password
|
// Check password
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
if !user.check_valid_password(&data.master_password_hash) {
|
||||||
err!("Username or password is incorrect. Try again.")
|
err!("Username or password is incorrect. Try again.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if recovery code is correct
|
// Check if recovery code is correct
|
||||||
if !user.check_valid_recovery_code(&data.RecoveryCode) {
|
if !user.check_valid_recovery_code(&data.recovery_code) {
|
||||||
err!("Recovery code is incorrect. Try again.")
|
err!("Recovery code is incorrect. Try again.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove all twofactors from the user
|
// Remove all twofactors from the user
|
||||||
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
|
||||||
|
enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &mut conn).await?;
|
||||||
|
|
||||||
log_user_event(
|
log_user_event(
|
||||||
EventType::UserRecovered2fa as i32,
|
EventType::UserRecovered2fa as i32,
|
||||||
@@ -121,23 +127,27 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct DisableTwoFactorData {
|
struct DisableTwoFactorData {
|
||||||
MasterPasswordHash: String,
|
master_password_hash: Option<String>,
|
||||||
Type: NumberOrString,
|
otp: Option<String>,
|
||||||
|
r#type: NumberOrString,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/disable", data = "<data>")]
|
#[post("/two-factor/disable", data = "<data>")]
|
||||||
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn disable_twofactor(data: Json<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: DisableTwoFactorData = data.into_inner().data;
|
let data: DisableTwoFactorData = data.into_inner();
|
||||||
let password_hash = data.MasterPasswordHash;
|
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&password_hash) {
|
// Delete directly after a valid token has been provided
|
||||||
err!("Invalid password");
|
PasswordOrOtpData {
|
||||||
|
master_password_hash: data.master_password_hash,
|
||||||
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let type_ = data.Type.into_i32()?;
|
let type_ = data.r#type.into_i32()?;
|
||||||
|
|
||||||
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
|
||||||
twofactor.delete(&mut conn).await?;
|
twofactor.delete(&mut conn).await?;
|
||||||
@@ -145,36 +155,94 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
|
if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() {
|
||||||
|
enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?;
|
||||||
if twofactor_disabled {
|
|
||||||
for user_org in
|
|
||||||
UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &mut conn)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
{
|
|
||||||
if user_org.atype < UserOrgType::Admin {
|
|
||||||
if CONFIG.mail_enabled() {
|
|
||||||
let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn).await.unwrap();
|
|
||||||
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
|
||||||
}
|
|
||||||
user_org.delete(&mut conn).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": false,
|
"enabled": false,
|
||||||
"Type": type_,
|
"type": type_,
|
||||||
"Object": "twoFactorProvider"
|
"object": "twoFactorProvider"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/disable", data = "<data>")]
|
#[put("/two-factor/disable", data = "<data>")]
|
||||||
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn disable_twofactor_put(data: Json<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
disable_twofactor(data, headers, conn).await
|
disable_twofactor(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn enforce_2fa_policy(
|
||||||
|
user: &User,
|
||||||
|
act_uuid: &str,
|
||||||
|
device_type: i32,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
{
|
||||||
|
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
|
||||||
|
if member.atype < UserOrgType::Admin {
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
|
||||||
|
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||||
|
}
|
||||||
|
let mut member = member;
|
||||||
|
member.revoke();
|
||||||
|
member.save(conn).await?;
|
||||||
|
|
||||||
|
log_event(
|
||||||
|
EventType::OrganizationUserRevoked as i32,
|
||||||
|
&member.uuid,
|
||||||
|
&member.org_uuid,
|
||||||
|
act_uuid,
|
||||||
|
device_type,
|
||||||
|
ip,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn enforce_2fa_policy_for_org(
|
||||||
|
org_uuid: &str,
|
||||||
|
act_uuid: &str,
|
||||||
|
device_type: i32,
|
||||||
|
ip: &std::net::IpAddr,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap();
|
||||||
|
for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() {
|
||||||
|
// Don't enforce the policy for Admins and Owners.
|
||||||
|
if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
|
||||||
|
mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
|
||||||
|
}
|
||||||
|
let mut member = member;
|
||||||
|
member.revoke();
|
||||||
|
member.save(conn).await?;
|
||||||
|
|
||||||
|
log_event(
|
||||||
|
EventType::OrganizationUserRevoked as i32,
|
||||||
|
&member.uuid,
|
||||||
|
org_uuid,
|
||||||
|
act_uuid,
|
||||||
|
device_type,
|
||||||
|
ip,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||||
debug!("Sending notifications for incomplete 2FA logins");
|
debug!("Sending notifications for incomplete 2FA logins");
|
||||||
|
|
||||||
@@ -191,7 +259,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
|
let time_limit = TimeDelta::try_minutes(CONFIG.incomplete_2fa_time_limit()).unwrap();
|
||||||
let time_before = now - time_limit;
|
let time_before = now - time_limit;
|
||||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
||||||
for login in incomplete_logins {
|
for login in incomplete_logins {
|
||||||
|
|||||||
143
src/api/core/two_factor/protected_actions.rs
Normal file
143
src/api/core/two_factor/protected_actions.rs
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
|
use rocket::{serde::json::Json, Route};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api::EmptyResult,
|
||||||
|
auth::Headers,
|
||||||
|
crypto,
|
||||||
|
db::{
|
||||||
|
models::{TwoFactor, TwoFactorType},
|
||||||
|
DbConn,
|
||||||
|
},
|
||||||
|
error::{Error, MapResult},
|
||||||
|
mail, CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn routes() -> Vec<Route> {
|
||||||
|
routes![request_otp, verify_otp]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data stored in the TwoFactor table in the db
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ProtectedActionData {
|
||||||
|
/// Token issued to validate the protected action
|
||||||
|
pub token: String,
|
||||||
|
/// UNIX timestamp of token issue.
|
||||||
|
pub token_sent: i64,
|
||||||
|
// The total amount of attempts
|
||||||
|
pub attempts: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProtectedActionData {
|
||||||
|
pub fn new(token: String) -> Self {
|
||||||
|
Self {
|
||||||
|
token,
|
||||||
|
token_sent: Utc::now().timestamp(),
|
||||||
|
attempts: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json(&self) -> String {
|
||||||
|
serde_json::to_string(&self).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_json(string: &str) -> Result<Self, Error> {
|
||||||
|
let res: Result<Self, crate::serde_json::Error> = serde_json::from_str(string);
|
||||||
|
match res {
|
||||||
|
Ok(x) => Ok(x),
|
||||||
|
Err(_) => err!("Could not decode ProtectedActionData from string"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_attempt(&mut self) {
|
||||||
|
self.attempts += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/request-otp")]
|
||||||
|
async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
|
// Only one Protected Action per user is allowed to take place, delete the previous one
|
||||||
|
if let Some(pa) =
|
||||||
|
TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::ProtectedActions as i32, &mut conn).await
|
||||||
|
{
|
||||||
|
pa.delete(&mut conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
|
||||||
|
let pa_data = ProtectedActionData::new(generated_token);
|
||||||
|
|
||||||
|
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
|
||||||
|
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::ProtectedActions, pa_data.to_json());
|
||||||
|
twofactor.save(&mut conn).await?;
|
||||||
|
|
||||||
|
mail::send_protected_action_token(&user.email, &pa_data.token).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct ProtectedActionVerify {
|
||||||
|
#[serde(rename = "OTP", alias = "otp")]
|
||||||
|
otp: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/verify-otp", data = "<data>")]
|
||||||
|
async fn verify_otp(data: Json<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
|
||||||
|
if !CONFIG.mail_enabled() {
|
||||||
|
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user = headers.user;
|
||||||
|
let data: ProtectedActionVerify = data.into_inner();
|
||||||
|
|
||||||
|
// Delete the token after one validation attempt
|
||||||
|
// This endpoint only gets called for the vault export, and doesn't need a second attempt
|
||||||
|
validate_protected_action_otp(&data.otp, &user.uuid, true, &mut conn).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn validate_protected_action_otp(
|
||||||
|
otp: &str,
|
||||||
|
user_uuid: &str,
|
||||||
|
delete_if_valid: bool,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> EmptyResult {
|
||||||
|
let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn)
|
||||||
|
.await
|
||||||
|
.map_res("Protected action token not found, try sending the code again or restart the process")?;
|
||||||
|
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
|
||||||
|
|
||||||
|
pa_data.add_attempt();
|
||||||
|
// Delete the token after x attempts if it has been used too many times
|
||||||
|
// We use the 6, which should be more then enough for invalid attempts and multiple valid checks
|
||||||
|
if pa_data.attempts > 6 {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
err!("Token has expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the token has expired (Using the email 2fa expiration time)
|
||||||
|
let date =
|
||||||
|
DateTime::from_timestamp(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.").naive_utc();
|
||||||
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
|
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
err!("Token has expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !crypto::ct_eq(&pa_data.token, otp) {
|
||||||
|
pa.save(conn).await?;
|
||||||
|
err!("Token is invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
if delete_if_valid {
|
||||||
|
pa.delete(conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -7,7 +7,7 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
|
|||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
|
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{
|
db::{
|
||||||
@@ -15,6 +15,7 @@ use crate::{
|
|||||||
DbConn,
|
DbConn,
|
||||||
},
|
},
|
||||||
error::Error,
|
error::Error,
|
||||||
|
util::NumberOrString,
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -95,40 +96,42 @@ pub struct WebauthnRegistration {
|
|||||||
impl WebauthnRegistration {
|
impl WebauthnRegistration {
|
||||||
fn to_json(&self) -> Value {
|
fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Id": self.id,
|
"id": self.id,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"migrated": self.migrated,
|
"migrated": self.migrated,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-webauthn", data = "<data>")]
|
#[post("/two-factor/get-webauthn", data = "<data>")]
|
||||||
async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
if !CONFIG.domain_set() {
|
if !CONFIG.domain_set() {
|
||||||
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
err!("Invalid password");
|
let user = headers.user;
|
||||||
}
|
|
||||||
|
|
||||||
let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &mut conn).await?;
|
data.validate(&user, false, &mut conn).await?;
|
||||||
|
|
||||||
|
let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &mut conn).await?;
|
||||||
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": enabled,
|
"enabled": enabled,
|
||||||
"Keys": registrations_json,
|
"keys": registrations_json,
|
||||||
"Object": "twoFactorWebAuthn"
|
"object": "twoFactorWebAuthn"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
|
||||||
async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
err!("Invalid password");
|
let user = headers.user;
|
||||||
}
|
|
||||||
|
|
||||||
let registrations = get_webauthn_registrations(&headers.user.uuid, &mut conn)
|
data.validate(&user, false, &mut conn).await?;
|
||||||
|
|
||||||
|
let registrations = get_webauthn_registrations(&user.uuid, &mut conn)
|
||||||
.await?
|
.await?
|
||||||
.1
|
.1
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -136,16 +139,16 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
|
||||||
headers.user.uuid.as_bytes().to_vec(),
|
user.uuid.as_bytes().to_vec(),
|
||||||
headers.user.email,
|
user.email,
|
||||||
headers.user.name,
|
user.name,
|
||||||
Some(registrations),
|
Some(registrations),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
let type_ = TwoFactorType::WebauthnRegisterChallenge;
|
||||||
TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?;
|
||||||
|
|
||||||
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
|
||||||
challenge_value["status"] = "ok".into();
|
challenge_value["status"] = "ok".into();
|
||||||
@@ -154,101 +157,97 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EnableWebauthnData {
|
struct EnableWebauthnData {
|
||||||
Id: NumberOrString, // 1..5
|
id: NumberOrString, // 1..5
|
||||||
Name: String,
|
name: String,
|
||||||
MasterPasswordHash: String,
|
device_response: RegisterPublicKeyCredentialCopy,
|
||||||
DeviceResponse: RegisterPublicKeyCredentialCopy,
|
master_password_hash: Option<String>,
|
||||||
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct RegisterPublicKeyCredentialCopy {
|
struct RegisterPublicKeyCredentialCopy {
|
||||||
pub Id: String,
|
pub id: String,
|
||||||
pub RawId: Base64UrlSafeData,
|
pub raw_id: Base64UrlSafeData,
|
||||||
pub Response: AuthenticatorAttestationResponseRawCopy,
|
pub response: AuthenticatorAttestationResponseRawCopy,
|
||||||
pub Type: String,
|
pub r#type: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
|
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct AuthenticatorAttestationResponseRawCopy {
|
pub struct AuthenticatorAttestationResponseRawCopy {
|
||||||
pub AttestationObject: Base64UrlSafeData,
|
#[serde(rename = "AttestationObject", alias = "attestationObject")]
|
||||||
pub ClientDataJson: Base64UrlSafeData,
|
pub attestation_object: Base64UrlSafeData,
|
||||||
|
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
|
||||||
|
pub client_data_json: Base64UrlSafeData,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
|
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
|
||||||
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
|
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
|
||||||
Self {
|
Self {
|
||||||
id: r.Id,
|
id: r.id,
|
||||||
raw_id: r.RawId,
|
raw_id: r.raw_id,
|
||||||
response: AuthenticatorAttestationResponseRaw {
|
response: AuthenticatorAttestationResponseRaw {
|
||||||
attestation_object: r.Response.AttestationObject,
|
attestation_object: r.response.attestation_object,
|
||||||
client_data_json: r.Response.ClientDataJson,
|
client_data_json: r.response.client_data_json,
|
||||||
},
|
},
|
||||||
type_: r.Type,
|
type_: r.r#type,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from PublicKeyCredential to change the Response objects casing
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct PublicKeyCredentialCopy {
|
pub struct PublicKeyCredentialCopy {
|
||||||
pub Id: String,
|
pub id: String,
|
||||||
pub RawId: Base64UrlSafeData,
|
pub raw_id: Base64UrlSafeData,
|
||||||
pub Response: AuthenticatorAssertionResponseRawCopy,
|
pub response: AuthenticatorAssertionResponseRawCopy,
|
||||||
pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
|
pub extensions: Option<AuthenticationExtensionsClientOutputs>,
|
||||||
pub Type: String,
|
pub r#type: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
|
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct AuthenticatorAssertionResponseRawCopy {
|
pub struct AuthenticatorAssertionResponseRawCopy {
|
||||||
pub AuthenticatorData: Base64UrlSafeData,
|
pub authenticator_data: Base64UrlSafeData,
|
||||||
pub ClientDataJson: Base64UrlSafeData,
|
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
|
||||||
pub Signature: Base64UrlSafeData,
|
pub client_data_json: Base64UrlSafeData,
|
||||||
pub UserHandle: Option<Base64UrlSafeData>,
|
pub signature: Base64UrlSafeData,
|
||||||
}
|
pub user_handle: Option<Base64UrlSafeData>,
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub struct AuthenticationExtensionsClientOutputsCopy {
|
|
||||||
#[serde(default)]
|
|
||||||
pub Appid: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
|
||||||
fn from(r: PublicKeyCredentialCopy) -> Self {
|
fn from(r: PublicKeyCredentialCopy) -> Self {
|
||||||
Self {
|
Self {
|
||||||
id: r.Id,
|
id: r.id,
|
||||||
raw_id: r.RawId,
|
raw_id: r.raw_id,
|
||||||
response: AuthenticatorAssertionResponseRaw {
|
response: AuthenticatorAssertionResponseRaw {
|
||||||
authenticator_data: r.Response.AuthenticatorData,
|
authenticator_data: r.response.authenticator_data,
|
||||||
client_data_json: r.Response.ClientDataJson,
|
client_data_json: r.response.client_data_json,
|
||||||
signature: r.Response.Signature,
|
signature: r.response.signature,
|
||||||
user_handle: r.Response.UserHandle,
|
user_handle: r.response.user_handle,
|
||||||
},
|
},
|
||||||
extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs {
|
extensions: r.extensions,
|
||||||
appid: e.Appid,
|
type_: r.r#type,
|
||||||
}),
|
|
||||||
type_: r.Type,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/webauthn", data = "<data>")]
|
#[post("/two-factor/webauthn", data = "<data>")]
|
||||||
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn activate_webauthn(data: Json<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: EnableWebauthnData = data.into_inner().data;
|
let data: EnableWebauthnData = data.into_inner();
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
master_password_hash: data.master_password_hash,
|
||||||
|
otp: data.otp,
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Retrieve and delete the saved challenge state
|
// Retrieve and delete the saved challenge state
|
||||||
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
|
||||||
@@ -263,13 +262,13 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
|
|||||||
|
|
||||||
// Verify the credentials with the saved state
|
// Verify the credentials with the saved state
|
||||||
let (credential, _data) =
|
let (credential, _data) =
|
||||||
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
|
WebauthnConfig::load().register_credential(&data.device_response.into(), &state, |_| Ok(false))?;
|
||||||
|
|
||||||
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
|
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
|
||||||
// TODO: Check for repeated ID's
|
// TODO: Check for repeated ID's
|
||||||
registrations.push(WebauthnRegistration {
|
registrations.push(WebauthnRegistration {
|
||||||
id: data.Id.into_i32()?,
|
id: data.id.into_i32()?,
|
||||||
name: data.Name,
|
name: data.name,
|
||||||
migrated: false,
|
migrated: false,
|
||||||
|
|
||||||
credential,
|
credential,
|
||||||
@@ -285,28 +284,28 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
|
|||||||
|
|
||||||
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
"Keys": keys_json,
|
"keys": keys_json,
|
||||||
"Object": "twoFactorU2f"
|
"object": "twoFactorU2f"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/webauthn", data = "<data>")]
|
#[put("/two-factor/webauthn", data = "<data>")]
|
||||||
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn activate_webauthn_put(data: Json<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
activate_webauthn(data, headers, conn).await
|
activate_webauthn(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct DeleteU2FData {
|
struct DeleteU2FData {
|
||||||
Id: NumberOrString,
|
id: NumberOrString,
|
||||||
MasterPasswordHash: String,
|
master_password_hash: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/two-factor/webauthn", data = "<data>")]
|
#[delete("/two-factor/webauthn", data = "<data>")]
|
||||||
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let id = data.data.Id.into_i32()?;
|
let id = data.id.into_i32()?;
|
||||||
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
|
if !headers.user.check_valid_password(&data.master_password_hash) {
|
||||||
err!("Invalid password");
|
err!("Invalid password");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,9 +346,9 @@ async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut
|
|||||||
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
"Keys": keys_json,
|
"keys": keys_json,
|
||||||
"Object": "twoFactorU2f"
|
"object": "twoFactorU2f"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -402,8 +401,8 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
|
let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?;
|
||||||
let rsp: PublicKeyCredential = rsp.data.into();
|
let rsp: PublicKeyCredential = rsp.into();
|
||||||
|
|
||||||
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
|
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use yubico::{config::Config, verify};
|
use yubico::{config::Config, verify_async};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::{log_user_event, two_factor::_generate_recover_code},
|
core::{log_user_event, two_factor::_generate_recover_code},
|
||||||
EmptyResult, JsonResult, JsonUpcase, PasswordData,
|
EmptyResult, JsonResult, PasswordOrOtpData,
|
||||||
},
|
},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
db::{
|
db::{
|
||||||
@@ -21,27 +21,30 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
|
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct EnableYubikeyData {
|
struct EnableYubikeyData {
|
||||||
MasterPasswordHash: String,
|
key1: Option<String>,
|
||||||
Key1: Option<String>,
|
key2: Option<String>,
|
||||||
Key2: Option<String>,
|
key3: Option<String>,
|
||||||
Key3: Option<String>,
|
key4: Option<String>,
|
||||||
Key4: Option<String>,
|
key5: Option<String>,
|
||||||
Key5: Option<String>,
|
nfc: bool,
|
||||||
Nfc: bool,
|
master_password_hash: Option<String>,
|
||||||
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize, Debug)]
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct YubikeyMetadata {
|
pub struct YubikeyMetadata {
|
||||||
Keys: Vec<String>,
|
#[serde(rename = "keys", alias = "Keys")]
|
||||||
pub Nfc: bool,
|
keys: Vec<String>,
|
||||||
|
#[serde(rename = "nfc", alias = "Nfc")]
|
||||||
|
pub nfc: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
|
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
|
||||||
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
|
let data_keys = [&data.key1, &data.key2, &data.key3, &data.key4, &data.key5];
|
||||||
|
|
||||||
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
|
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
|
||||||
}
|
}
|
||||||
@@ -73,26 +76,21 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
|||||||
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
|
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
|
||||||
|
|
||||||
match CONFIG.yubico_server() {
|
match CONFIG.yubico_server() {
|
||||||
Some(server) => {
|
Some(server) => verify_async(otp, config.set_api_hosts(vec![server])).await,
|
||||||
tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap()
|
None => verify_async(otp, config).await,
|
||||||
}
|
|
||||||
None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(),
|
|
||||||
}
|
}
|
||||||
.map_res("Failed to verify OTP")
|
.map_res("Failed to verify OTP")
|
||||||
.and(Ok(()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-yubikey", data = "<data>")]
|
#[post("/two-factor/get-yubikey", data = "<data>")]
|
||||||
async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
// Make sure the credentials are set
|
// Make sure the credentials are set
|
||||||
get_yubico_credentials()?;
|
get_yubico_credentials()?;
|
||||||
|
|
||||||
let data: PasswordData = data.into_inner().data;
|
let data: PasswordOrOtpData = data.into_inner();
|
||||||
let user = headers.user;
|
let user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
data.validate(&user, false, &mut conn).await?;
|
||||||
err!("Invalid password");
|
|
||||||
}
|
|
||||||
|
|
||||||
let user_uuid = &user.uuid;
|
let user_uuid = &user.uuid;
|
||||||
let yubikey_type = TwoFactorType::YubiKey as i32;
|
let yubikey_type = TwoFactorType::YubiKey as i32;
|
||||||
@@ -102,29 +100,32 @@ async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut
|
|||||||
if let Some(r) = r {
|
if let Some(r) = r {
|
||||||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
|
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
|
||||||
|
|
||||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
|
||||||
|
|
||||||
result["Enabled"] = Value::Bool(true);
|
result["enabled"] = Value::Bool(true);
|
||||||
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
|
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
|
||||||
result["Object"] = Value::String("twoFactorU2f".to_owned());
|
result["object"] = Value::String("twoFactorU2f".to_owned());
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
} else {
|
} else {
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Enabled": false,
|
"enabled": false,
|
||||||
"Object": "twoFactorU2f",
|
"object": "twoFactorU2f",
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/yubikey", data = "<data>")]
|
#[post("/two-factor/yubikey", data = "<data>")]
|
||||||
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||||
let data: EnableYubikeyData = data.into_inner().data;
|
let data: EnableYubikeyData = data.into_inner();
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
|
||||||
if !user.check_valid_password(&data.MasterPasswordHash) {
|
PasswordOrOtpData {
|
||||||
err!("Invalid password");
|
master_password_hash: data.master_password_hash.clone(),
|
||||||
|
otp: data.otp.clone(),
|
||||||
}
|
}
|
||||||
|
.validate(&user, true, &mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// Check if we already have some data
|
// Check if we already have some data
|
||||||
let mut yubikey_data =
|
let mut yubikey_data =
|
||||||
@@ -137,8 +138,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||||||
|
|
||||||
if yubikeys.is_empty() {
|
if yubikeys.is_empty() {
|
||||||
return Ok(Json(json!({
|
return Ok(Json(json!({
|
||||||
"Enabled": false,
|
"enabled": false,
|
||||||
"Object": "twoFactorU2f",
|
"object": "twoFactorU2f",
|
||||||
})));
|
})));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -155,8 +156,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||||||
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
|
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
|
||||||
|
|
||||||
let yubikey_metadata = YubikeyMetadata {
|
let yubikey_metadata = YubikeyMetadata {
|
||||||
Keys: yubikey_ids,
|
keys: yubikey_ids,
|
||||||
Nfc: data.Nfc,
|
nfc: data.nfc,
|
||||||
};
|
};
|
||||||
|
|
||||||
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
|
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
|
||||||
@@ -166,17 +167,17 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
|
|||||||
|
|
||||||
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
|
||||||
|
|
||||||
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
|
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
|
||||||
|
|
||||||
result["Enabled"] = Value::Bool(true);
|
result["enabled"] = Value::Bool(true);
|
||||||
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
|
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
|
||||||
result["Object"] = Value::String("twoFactorU2f".to_owned());
|
result["object"] = Value::String("twoFactorU2f".to_owned());
|
||||||
|
|
||||||
Ok(Json(result))
|
Ok(Json(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/two-factor/yubikey", data = "<data>")]
|
#[put("/two-factor/yubikey", data = "<data>")]
|
||||||
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
async fn activate_yubikey_put(data: Json<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
activate_yubikey(data, headers, conn).await
|
activate_yubikey(data, headers, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,14 +189,10 @@ pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> Emp
|
|||||||
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
|
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
|
||||||
let response_id = &response[..12];
|
let response_id = &response[..12];
|
||||||
|
|
||||||
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
|
if !yubikey_metadata.keys.contains(&response_id.to_owned()) {
|
||||||
err!("Given Yubikey is not registered");
|
err!("Given Yubikey is not registered");
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = verify_yubikey_otp(response.to_owned()).await;
|
verify_yubikey_otp(response.to_owned()).await.map_res("Failed to verify Yubikey against OTP server")?;
|
||||||
|
Ok(())
|
||||||
match result {
|
|
||||||
Ok(_answer) => Ok(()),
|
|
||||||
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
320
src/api/icons.rs
320
src/api/icons.rs
@@ -1,6 +1,6 @@
|
|||||||
use std::{
|
use std::{
|
||||||
net::IpAddr,
|
net::IpAddr,
|
||||||
sync::Arc,
|
sync::{Arc, Mutex},
|
||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -16,14 +16,13 @@ use rocket::{http::ContentType, response::Redirect, Route};
|
|||||||
use tokio::{
|
use tokio::{
|
||||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||||
io::{AsyncReadExt, AsyncWriteExt},
|
io::{AsyncReadExt, AsyncWriteExt},
|
||||||
net::lookup_host,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_reqwest_client_builder, Cached},
|
util::{get_reqwest_client_builder, Cached, CustomDnsResolver, CustomResolverError},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -49,48 +48,32 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
|||||||
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
||||||
let pool_idle_timeout = Duration::from_secs(10);
|
let pool_idle_timeout = Duration::from_secs(10);
|
||||||
// Reuse the client between requests
|
// Reuse the client between requests
|
||||||
let client = get_reqwest_client_builder()
|
get_reqwest_client_builder()
|
||||||
.cookie_provider(Arc::clone(&cookie_store))
|
.cookie_provider(Arc::clone(&cookie_store))
|
||||||
.timeout(icon_download_timeout)
|
.timeout(icon_download_timeout)
|
||||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||||
.trust_dns(true)
|
.dns_resolver(CustomDnsResolver::instance())
|
||||||
.default_headers(default_headers.clone());
|
.default_headers(default_headers.clone())
|
||||||
|
.build()
|
||||||
match client.build() {
|
.expect("Failed to build client")
|
||||||
Ok(client) => client,
|
|
||||||
Err(e) => {
|
|
||||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
|
||||||
get_reqwest_client_builder()
|
|
||||||
.cookie_provider(cookie_store)
|
|
||||||
.timeout(icon_download_timeout)
|
|
||||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
|
||||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
|
||||||
.trust_dns(false)
|
|
||||||
.default_headers(default_headers)
|
|
||||||
.build()
|
|
||||||
.expect("Failed to build client")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Build Regex only once since this takes a lot of time.
|
// Build Regex only once since this takes a lot of time.
|
||||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||||
|
|
||||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
#[get("/<domain>/icon.png")]
|
||||||
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
|
fn icon_external(domain: &str) -> Option<Redirect> {
|
||||||
|
|
||||||
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
|
||||||
if !is_valid_domain(domain) {
|
if !is_valid_domain(domain) {
|
||||||
warn!("Invalid domain: {}", domain);
|
warn!("Invalid domain: {}", domain);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
if check_domain_blacklist_reason(domain).await.is_some() {
|
if is_domain_blacklisted(domain) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let url = template.replace("{}", domain);
|
let url = CONFIG._icon_service_url().replace("{}", domain);
|
||||||
match CONFIG.icon_redirect_code() {
|
match CONFIG.icon_redirect_code() {
|
||||||
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
||||||
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
||||||
@@ -103,11 +86,6 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
|
||||||
async fn icon_external(domain: &str) -> Option<Redirect> {
|
|
||||||
icon_redirect(domain, &CONFIG._icon_service_url()).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
#[get("/<domain>/icon.png")]
|
||||||
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||||
@@ -166,153 +144,28 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
pub fn is_domain_blacklisted(domain: &str) -> bool {
|
||||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
let Some(config_blacklist) = CONFIG.icon_blacklist_regex() else {
|
||||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
return false;
|
||||||
#[allow(clippy::nonminimal_bool)]
|
};
|
||||||
#[cfg(not(feature = "unstable"))]
|
|
||||||
fn is_global(ip: IpAddr) -> bool {
|
|
||||||
match ip {
|
|
||||||
IpAddr::V4(ip) => {
|
|
||||||
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
|
|
||||||
// globally routable addresses in the 192.0.0.0/24 range.
|
|
||||||
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
!ip.is_private()
|
|
||||||
&& !ip.is_loopback()
|
|
||||||
&& !ip.is_link_local()
|
|
||||||
&& !ip.is_broadcast()
|
|
||||||
&& !ip.is_documentation()
|
|
||||||
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
|
|
||||||
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
|
||||||
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
|
|
||||||
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
|
|
||||||
// Make sure the address is not in 0.0.0.0/8
|
|
||||||
&& ip.octets()[0] != 0
|
|
||||||
}
|
|
||||||
IpAddr::V6(ip) => {
|
|
||||||
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
!ip.is_multicast()
|
|
||||||
&& !ip.is_loopback()
|
|
||||||
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
|
|
||||||
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
|
|
||||||
&& !ip.is_unspecified()
|
|
||||||
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "unstable")]
|
// Compiled domain blacklist
|
||||||
fn is_global(ip: IpAddr) -> bool {
|
static COMPILED_BLACKLIST: Mutex<Option<(String, Regex)>> = Mutex::new(None);
|
||||||
ip.is_global()
|
let mut guard = COMPILED_BLACKLIST.lock().unwrap();
|
||||||
}
|
|
||||||
|
|
||||||
/// These are some tests to check that the implementations match
|
// If the stored regex is up to date, use it
|
||||||
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
|
if let Some((value, regex)) = &*guard {
|
||||||
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
|
if value == &config_blacklist {
|
||||||
/// Note that the is_global implementation is subject to change as new IP RFCs are created
|
return regex.is_match(domain);
|
||||||
///
|
|
||||||
/// To run while showing progress output:
|
|
||||||
/// cargo test --features sqlite,unstable -- --nocapture --ignored
|
|
||||||
#[cfg(test)]
|
|
||||||
#[cfg(feature = "unstable")]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn test_ipv4_global() {
|
|
||||||
for a in 0..u8::MAX {
|
|
||||||
println!("Iter: {}/255", a);
|
|
||||||
for b in 0..u8::MAX {
|
|
||||||
for c in 0..u8::MAX {
|
|
||||||
for d in 0..u8::MAX {
|
|
||||||
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
|
|
||||||
assert_eq!(ip.is_global(), is_global(ip))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
// If we don't have a regex stored, or it's not up to date, recreate it
|
||||||
#[ignore]
|
let regex = Regex::new(&config_blacklist).unwrap();
|
||||||
fn test_ipv6_global() {
|
let is_match = regex.is_match(domain);
|
||||||
use ring::rand::{SecureRandom, SystemRandom};
|
*guard = Some((config_blacklist, regex));
|
||||||
let mut v = [0u8; 16];
|
|
||||||
let rand = SystemRandom::new();
|
|
||||||
for i in 0..1_000 {
|
|
||||||
println!("Iter: {}/1_000", i);
|
|
||||||
for _ in 0..10_000_000 {
|
|
||||||
rand.fill(&mut v).expect("Error generating random values");
|
|
||||||
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
|
|
||||||
(v[14] as u16) << 8 | v[15] as u16,
|
|
||||||
(v[12] as u16) << 8 | v[13] as u16,
|
|
||||||
(v[10] as u16) << 8 | v[11] as u16,
|
|
||||||
(v[8] as u16) << 8 | v[9] as u16,
|
|
||||||
(v[6] as u16) << 8 | v[7] as u16,
|
|
||||||
(v[4] as u16) << 8 | v[5] as u16,
|
|
||||||
(v[2] as u16) << 8 | v[3] as u16,
|
|
||||||
(v[0] as u16) << 8 | v[1] as u16,
|
|
||||||
));
|
|
||||||
assert_eq!(ip.is_global(), is_global(ip))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
is_match
|
||||||
enum DomainBlacklistReason {
|
|
||||||
Regex,
|
|
||||||
IP,
|
|
||||||
}
|
|
||||||
|
|
||||||
use cached::proc_macro::cached;
|
|
||||||
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
|
|
||||||
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
|
|
||||||
// First check the blacklist regex if there is a match.
|
|
||||||
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
|
|
||||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
|
||||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
|
||||||
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
|
|
||||||
regex.is_match(domain)
|
|
||||||
} else {
|
|
||||||
// Clear the current list if the previous key doesn't exists.
|
|
||||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
|
||||||
if ICON_BLACKLIST_REGEX.len() >= 1 {
|
|
||||||
ICON_BLACKLIST_REGEX.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate the regex to store in too the Lazy Static HashMap.
|
|
||||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
|
||||||
let is_match = blacklist_regex.is_match(domain);
|
|
||||||
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
|
|
||||||
|
|
||||||
is_match
|
|
||||||
};
|
|
||||||
|
|
||||||
if is_match {
|
|
||||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
|
||||||
return Some(DomainBlacklistReason::Regex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if CONFIG.icon_blacklist_non_global_ips() {
|
|
||||||
if let Ok(s) = lookup_host((domain, 0)).await {
|
|
||||||
for addr in s {
|
|
||||||
if !is_global(addr.ip()) {
|
|
||||||
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
|
|
||||||
return Some(DomainBlacklistReason::IP);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||||
@@ -342,6 +195,13 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
|||||||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
// If this error comes from the custom resolver, this means this is a blacklisted domain
|
||||||
|
// or non global IP, don't save the miss file in this case to avoid leaking it
|
||||||
|
if let Some(error) = CustomResolverError::downcast_ref(&e) {
|
||||||
|
warn!("{error}");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
warn!("Unable to download icon: {:?}", e);
|
warn!("Unable to download icon: {:?}", e);
|
||||||
let miss_indicator = path + ".miss";
|
let miss_indicator = path + ".miss";
|
||||||
save_icon(&miss_indicator, &[]).await;
|
save_icon(&miss_indicator, &[]).await;
|
||||||
@@ -491,42 +351,48 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
|||||||
let ssldomain = format!("https://{domain}");
|
let ssldomain = format!("https://{domain}");
|
||||||
let httpdomain = format!("http://{domain}");
|
let httpdomain = format!("http://{domain}");
|
||||||
|
|
||||||
// First check the domain as given during the request for both HTTPS and HTTP.
|
// First check the domain as given during the request for HTTPS.
|
||||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
|
let resp = match get_page(&ssldomain).await {
|
||||||
Ok(c) => Ok(c),
|
Err(e) if CustomResolverError::downcast_ref(&e).is_none() => {
|
||||||
Err(e) => {
|
// If we get an error that is not caused by the blacklist, we retry with HTTP
|
||||||
let mut sub_resp = Err(e);
|
match get_page(&httpdomain).await {
|
||||||
|
mut sub_resp @ Err(_) => {
|
||||||
|
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||||
|
let is_ip = domain.parse::<IpAddr>();
|
||||||
|
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||||
|
let mut domain_parts = domain.split('.');
|
||||||
|
let base_domain = format!(
|
||||||
|
"{base}.{tld}",
|
||||||
|
tld = domain_parts.next_back().unwrap(),
|
||||||
|
base = domain_parts.next_back().unwrap()
|
||||||
|
);
|
||||||
|
if is_valid_domain(&base_domain) {
|
||||||
|
let sslbase = format!("https://{base_domain}");
|
||||||
|
let httpbase = format!("http://{base_domain}");
|
||||||
|
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
||||||
|
|
||||||
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
||||||
let is_ip = domain.parse::<IpAddr>();
|
}
|
||||||
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
|
||||||
let mut domain_parts = domain.split('.');
|
|
||||||
let base_domain = format!(
|
|
||||||
"{base}.{tld}",
|
|
||||||
tld = domain_parts.next_back().unwrap(),
|
|
||||||
base = domain_parts.next_back().unwrap()
|
|
||||||
);
|
|
||||||
if is_valid_domain(&base_domain) {
|
|
||||||
let sslbase = format!("https://{base_domain}");
|
|
||||||
let httpbase = format!("http://{base_domain}");
|
|
||||||
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
|
||||||
|
|
||||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||||
}
|
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||||
|
let www_domain = format!("www.{domain}");
|
||||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
if is_valid_domain(&www_domain) {
|
||||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
let sslwww = format!("https://{www_domain}");
|
||||||
let www_domain = format!("www.{domain}");
|
let httpwww = format!("http://{www_domain}");
|
||||||
if is_valid_domain(&www_domain) {
|
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
||||||
let sslwww = format!("https://{www_domain}");
|
|
||||||
let httpwww = format!("http://{www_domain}");
|
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
||||||
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
}
|
||||||
|
}
|
||||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
sub_resp
|
||||||
}
|
}
|
||||||
|
res => res,
|
||||||
}
|
}
|
||||||
sub_resp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we get a result or a blacklist error, just continue
|
||||||
|
res => res,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create the iconlist
|
// Create the iconlist
|
||||||
@@ -573,21 +439,12 @@ async fn get_page(url: &str) -> Result<Response, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||||
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
|
||||||
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
|
|
||||||
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut client = CLIENT.get(url);
|
let mut client = CLIENT.get(url);
|
||||||
if !referer.is_empty() {
|
if !referer.is_empty() {
|
||||||
client = client.header("Referer", referer)
|
client = client.header("Referer", referer)
|
||||||
}
|
}
|
||||||
|
|
||||||
match client.send().await {
|
Ok(client.send().await?.error_for_status()?)
|
||||||
Ok(c) => c.error_for_status().map_err(Into::into),
|
|
||||||
Err(e) => err_silent!(format!("{e}")),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||||
@@ -670,12 +527,6 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||||
match check_domain_blacklist_reason(domain).await {
|
|
||||||
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
|
|
||||||
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
let icon_result = get_icon_url(domain).await?;
|
let icon_result = get_icon_url(domain).await?;
|
||||||
|
|
||||||
let mut buffer = Bytes::new();
|
let mut buffer = Bytes::new();
|
||||||
@@ -711,22 +562,19 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
|||||||
_ => debug!("Extracted icon from data:image uri is invalid"),
|
_ => debug!("Extracted icon from data:image uri is invalid"),
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
match get_page_with_referer(&icon.href, &icon_result.referer).await {
|
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
|
||||||
Ok(res) => {
|
|
||||||
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
|
||||||
|
|
||||||
// Check if the icon type is allowed, else try an icon from the list.
|
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
||||||
icon_type = get_icon_type(&buffer);
|
|
||||||
if icon_type.is_none() {
|
// Check if the icon type is allowed, else try an icon from the list.
|
||||||
buffer.clear();
|
icon_type = get_icon_type(&buffer);
|
||||||
debug!("Icon from {}, is not a valid image type", icon.href);
|
if icon_type.is_none() {
|
||||||
continue;
|
buffer.clear();
|
||||||
}
|
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||||
info!("Downloaded icon from {}", icon.href);
|
continue;
|
||||||
break;
|
}
|
||||||
}
|
info!("Downloaded icon from {}", icon.href);
|
||||||
Err(e) => debug!("{:?}", e),
|
break;
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,10 +9,13 @@ use serde_json::Value;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
core::accounts::{PreloginData, RegisterData, _prelogin, _register},
|
core::{
|
||||||
core::log_user_event,
|
accounts::{PreloginData, RegisterData, _prelogin, _register},
|
||||||
core::two_factor::{duo, email, email::EmailTokenData, yubikey},
|
log_user_event,
|
||||||
ApiResult, EmptyResult, JsonResult, JsonUpcase,
|
two_factor::{authenticator, duo, email, enforce_2fa_policy, webauthn, yubikey},
|
||||||
|
},
|
||||||
|
push::register_push_device,
|
||||||
|
ApiResult, EmptyResult, JsonResult,
|
||||||
},
|
},
|
||||||
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
|
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
@@ -103,8 +106,13 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
|
|||||||
|
|
||||||
// Common
|
// Common
|
||||||
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
|
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
|
||||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// ---
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
// Disabled this variable, it was used to generate the JWT
|
||||||
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
|
// ---
|
||||||
|
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
let result = json!({
|
let result = json!({
|
||||||
@@ -242,7 +250,7 @@ async fn _password_login(
|
|||||||
|
|
||||||
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
let (mut device, new_device) = get_device(&data, conn, &user).await;
|
||||||
|
|
||||||
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?;
|
let twofactor_token = twofactor_auth(&user, &data, &mut device, ip, conn).await?;
|
||||||
|
|
||||||
if CONFIG.mail_enabled() && new_device {
|
if CONFIG.mail_enabled() && new_device {
|
||||||
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
|
||||||
@@ -259,9 +267,19 @@ async fn _password_login(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// register push device
|
||||||
|
if !new_device {
|
||||||
|
register_push_device(&mut device, conn).await?;
|
||||||
|
}
|
||||||
|
|
||||||
// Common
|
// Common
|
||||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// ---
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
// Disabled this variable, it was used to generate the JWT
|
||||||
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
|
// ---
|
||||||
|
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
@@ -277,7 +295,12 @@ async fn _password_login(
|
|||||||
"KdfIterations": user.client_kdf_iter,
|
"KdfIterations": user.client_kdf_iter,
|
||||||
"KdfMemory": user.client_kdf_memory,
|
"KdfMemory": user.client_kdf_memory,
|
||||||
"KdfParallelism": user.client_kdf_parallelism,
|
"KdfParallelism": user.client_kdf_parallelism,
|
||||||
"ResetMasterPassword": false,// TODO: Same as above
|
"ResetMasterPassword": false, // TODO: Same as above
|
||||||
|
"ForcePasswordReset": false,
|
||||||
|
"MasterPasswordPolicy": {
|
||||||
|
"object": "masterPasswordPolicy",
|
||||||
|
},
|
||||||
|
|
||||||
"scope": scope,
|
"scope": scope,
|
||||||
"unofficialServer": true,
|
"unofficialServer": true,
|
||||||
"UserDecryptionOptions": {
|
"UserDecryptionOptions": {
|
||||||
@@ -374,8 +397,13 @@ async fn _user_api_key_login(
|
|||||||
|
|
||||||
// Common
|
// Common
|
||||||
let scope_vec = vec!["api".into()];
|
let scope_vec = vec!["api".into()];
|
||||||
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
// ---
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
|
// Disabled this variable, it was used to generate the JWT
|
||||||
|
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
|
// ---
|
||||||
|
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
|
||||||
|
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
|
||||||
device.save(conn).await?;
|
device.save(conn).await?;
|
||||||
|
|
||||||
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
|
||||||
@@ -453,32 +481,32 @@ async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Devi
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn twofactor_auth(
|
async fn twofactor_auth(
|
||||||
user_uuid: &str,
|
user: &User,
|
||||||
data: &ConnectData,
|
data: &ConnectData,
|
||||||
device: &mut Device,
|
device: &mut Device,
|
||||||
ip: &ClientIp,
|
ip: &ClientIp,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> ApiResult<Option<String>> {
|
) -> ApiResult<Option<String>> {
|
||||||
let twofactors = TwoFactor::find_by_user(user_uuid, conn).await;
|
let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await;
|
||||||
|
|
||||||
// No twofactor token if twofactor is disabled
|
// No twofactor token if twofactor is disabled
|
||||||
if twofactors.is_empty() {
|
if twofactors.is_empty() {
|
||||||
|
enforce_2fa_policy(user, &user.uuid, device.atype, &ip.ip, conn).await?;
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?;
|
TwoFactorIncomplete::mark_incomplete(&user.uuid, &device.uuid, &device.name, ip, conn).await?;
|
||||||
|
|
||||||
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
|
||||||
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
|
||||||
|
|
||||||
let twofactor_code = match data.two_factor_token {
|
let twofactor_code = match data.two_factor_token {
|
||||||
Some(ref code) => code,
|
Some(ref code) => code,
|
||||||
None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, "2FA token not provided"),
|
None => err_json!(_json_err_twofactor(&twofactor_ids, &user.uuid, conn).await?, "2FA token not provided"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled);
|
||||||
|
|
||||||
use crate::api::core::two_factor as _tf;
|
|
||||||
use crate::crypto::ct_eq;
|
use crate::crypto::ct_eq;
|
||||||
|
|
||||||
let selected_data = _selected_data(selected_twofactor);
|
let selected_data = _selected_data(selected_twofactor);
|
||||||
@@ -486,17 +514,15 @@ async fn twofactor_auth(
|
|||||||
|
|
||||||
match TwoFactorType::from_i32(selected_id) {
|
match TwoFactorType::from_i32(selected_id) {
|
||||||
Some(TwoFactorType::Authenticator) => {
|
Some(TwoFactorType::Authenticator) => {
|
||||||
_tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn).await?
|
authenticator::validate_totp_code_str(&user.uuid, twofactor_code, &selected_data?, ip, conn).await?
|
||||||
}
|
}
|
||||||
Some(TwoFactorType::Webauthn) => {
|
Some(TwoFactorType::Webauthn) => webauthn::validate_webauthn_login(&user.uuid, twofactor_code, conn).await?,
|
||||||
_tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await?
|
Some(TwoFactorType::YubiKey) => yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
||||||
}
|
|
||||||
Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?,
|
|
||||||
Some(TwoFactorType::Duo) => {
|
Some(TwoFactorType::Duo) => {
|
||||||
_tf::duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await?
|
||||||
}
|
}
|
||||||
Some(TwoFactorType::Email) => {
|
Some(TwoFactorType::Email) => {
|
||||||
_tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn).await?
|
email::validate_email_code_str(&user.uuid, twofactor_code, &selected_data?, conn).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(TwoFactorType::Remember) => {
|
Some(TwoFactorType::Remember) => {
|
||||||
@@ -506,7 +532,7 @@ async fn twofactor_auth(
|
|||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
err_json!(
|
err_json!(
|
||||||
_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?,
|
_json_err_twofactor(&twofactor_ids, &user.uuid, conn).await?,
|
||||||
"2FA Remember token not provided"
|
"2FA Remember token not provided"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -520,7 +546,7 @@ async fn twofactor_auth(
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?;
|
TwoFactorIncomplete::mark_complete(&user.uuid, &device.uuid, conn).await?;
|
||||||
|
|
||||||
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
if !CONFIG.disable_2fa_remember() && remember == 1 {
|
||||||
Ok(Some(device.refresh_twofactor_remember()))
|
Ok(Some(device.refresh_twofactor_remember()))
|
||||||
@@ -535,13 +561,14 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
|
async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> {
|
||||||
use crate::api::core::two_factor;
|
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
"error" : "invalid_grant",
|
"error" : "invalid_grant",
|
||||||
"error_description" : "Two factor required.",
|
"error_description" : "Two factor required.",
|
||||||
"TwoFactorProviders" : providers,
|
"TwoFactorProviders" : providers.iter().map(ToString::to_string).collect::<Vec<String>>(),
|
||||||
"TwoFactorProviders2" : {} // { "0" : null }
|
"TwoFactorProviders2" : {}, // { "0" : null }
|
||||||
|
"MasterPasswordPolicy": {
|
||||||
|
"Object": "masterPasswordPolicy"
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
for provider in providers {
|
for provider in providers {
|
||||||
@@ -551,7 +578,7 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
|
||||||
|
|
||||||
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
|
||||||
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
let request = webauthn::generate_webauthn_login(user_uuid, conn).await?;
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
result["TwoFactorProviders2"][provider.to_string()] = request.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -578,13 +605,11 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
|
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
|
||||||
|
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
"Nfc": yubikey_metadata.Nfc,
|
"Nfc": yubikey_metadata.nfc,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(tf_type @ TwoFactorType::Email) => {
|
Some(tf_type @ TwoFactorType::Email) => {
|
||||||
use crate::api::core::two_factor as _tf;
|
|
||||||
|
|
||||||
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await {
|
||||||
Some(tf) => tf,
|
Some(tf) => tf,
|
||||||
None => err!("No twofactor email registered"),
|
None => err!("No twofactor email registered"),
|
||||||
@@ -592,10 +617,10 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
|
|
||||||
// Send email immediately if email is the only 2FA option
|
// Send email immediately if email is the only 2FA option
|
||||||
if providers.len() == 1 {
|
if providers.len() == 1 {
|
||||||
_tf::email::send_token(user_uuid, conn).await?
|
email::send_token(user_uuid, conn).await?
|
||||||
}
|
}
|
||||||
|
|
||||||
let email_data = EmailTokenData::from_json(&twofactor.data)?;
|
let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
|
||||||
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
result["TwoFactorProviders2"][provider.to_string()] = json!({
|
||||||
"Email": email::obscure_email(&email_data.email),
|
"Email": email::obscure_email(&email_data.email),
|
||||||
})
|
})
|
||||||
@@ -609,19 +634,18 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/prelogin", data = "<data>")]
|
#[post("/accounts/prelogin", data = "<data>")]
|
||||||
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
|
async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
|
||||||
_prelogin(data, conn).await
|
_prelogin(data, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/register", data = "<data>")]
|
#[post("/accounts/register", data = "<data>")]
|
||||||
async fn identity_register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
|
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
|
||||||
_register(data, conn).await
|
_register(data, conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
|
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
|
||||||
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
|
||||||
#[derive(Debug, Clone, Default, FromForm)]
|
#[derive(Debug, Clone, Default, FromForm)]
|
||||||
#[allow(non_snake_case)]
|
|
||||||
struct ConnectData {
|
struct ConnectData {
|
||||||
#[field(name = uncased("grant_type"))]
|
#[field(name = uncased("grant_type"))]
|
||||||
#[field(name = uncased("granttype"))]
|
#[field(name = uncased("granttype"))]
|
||||||
|
|||||||
@@ -20,10 +20,10 @@ pub use crate::api::{
|
|||||||
core::two_factor::send_incomplete_2fa_notifications,
|
core::two_factor::send_incomplete_2fa_notifications,
|
||||||
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
|
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
|
||||||
core::{event_cleanup_job, events_routes as core_events_routes},
|
core::{event_cleanup_job, events_routes as core_events_routes},
|
||||||
icons::routes as icons_routes,
|
icons::{is_domain_blacklisted, routes as icons_routes},
|
||||||
identity::routes as identity_routes,
|
identity::routes as identity_routes,
|
||||||
notifications::routes as notifications_routes,
|
notifications::routes as notifications_routes,
|
||||||
notifications::{start_notification_server, AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS},
|
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
|
||||||
push::{
|
push::{
|
||||||
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
|
||||||
unregister_push_device,
|
unregister_push_device,
|
||||||
@@ -32,47 +32,39 @@ pub use crate::api::{
|
|||||||
web::routes as web_routes,
|
web::routes as web_routes,
|
||||||
web::static_files,
|
web::static_files,
|
||||||
};
|
};
|
||||||
use crate::util;
|
use crate::db::{models::User, DbConn};
|
||||||
|
|
||||||
// Type aliases for API methods results
|
// Type aliases for API methods results
|
||||||
type ApiResult<T> = Result<T, crate::error::Error>;
|
type ApiResult<T> = Result<T, crate::error::Error>;
|
||||||
pub type JsonResult = ApiResult<Json<Value>>;
|
pub type JsonResult = ApiResult<Json<Value>>;
|
||||||
pub type EmptyResult = ApiResult<()>;
|
pub type EmptyResult = ApiResult<()>;
|
||||||
|
|
||||||
type JsonUpcase<T> = Json<util::UpCase<T>>;
|
|
||||||
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
|
|
||||||
type JsonVec<T> = Json<Vec<T>>;
|
|
||||||
|
|
||||||
// Common structs representing JSON data received
|
// Common structs representing JSON data received
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct PasswordData {
|
struct PasswordOrOtpData {
|
||||||
MasterPasswordHash: String,
|
master_password_hash: Option<String>,
|
||||||
|
otp: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Clone)]
|
impl PasswordOrOtpData {
|
||||||
#[serde(untagged)]
|
/// Tokens used via this struct can be used multiple times during the process
|
||||||
enum NumberOrString {
|
/// First for the validation to continue, after that to enable or validate the following actions
|
||||||
Number(i32),
|
/// This is different per caller, so it can be adjusted to delete the token or not
|
||||||
String(String),
|
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
|
||||||
}
|
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
|
||||||
|
|
||||||
impl NumberOrString {
|
match (self.master_password_hash.as_deref(), self.otp.as_deref()) {
|
||||||
fn into_string(self) -> String {
|
(Some(pw_hash), None) => {
|
||||||
match self {
|
if !user.check_valid_password(pw_hash) {
|
||||||
NumberOrString::Number(n) => n.to_string(),
|
err!("Invalid password");
|
||||||
NumberOrString::String(s) => s,
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::wrong_self_convention)]
|
|
||||||
fn into_i32(&self) -> ApiResult<i32> {
|
|
||||||
use std::num::ParseIntError as PIE;
|
|
||||||
match self {
|
|
||||||
NumberOrString::Number(n) => Ok(*n),
|
|
||||||
NumberOrString::String(s) => {
|
|
||||||
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
|
||||||
}
|
}
|
||||||
|
(None, Some(otp)) => {
|
||||||
|
validate_protected_action_otp(otp, &user.uuid, delete_if_valid, conn).await?;
|
||||||
|
}
|
||||||
|
_ => err!("No validation provided"),
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,23 +1,11 @@
|
|||||||
use std::{
|
use std::{net::IpAddr, sync::Arc, time::Duration};
|
||||||
net::{IpAddr, SocketAddr},
|
|
||||||
sync::Arc,
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use rmpv::Value;
|
use rmpv::Value;
|
||||||
use rocket::{
|
use rocket::{futures::StreamExt, Route};
|
||||||
futures::{SinkExt, StreamExt},
|
use tokio::sync::mpsc::Sender;
|
||||||
Route,
|
|
||||||
};
|
use rocket_ws::{Message, WebSocket};
|
||||||
use tokio::{
|
|
||||||
net::{TcpListener, TcpStream},
|
|
||||||
sync::mpsc::Sender,
|
|
||||||
};
|
|
||||||
use tokio_tungstenite::{
|
|
||||||
accept_hdr_async,
|
|
||||||
tungstenite::{handshake, Message},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
auth::{ClientIp, WsAccessTokenHeader},
|
auth::{ClientIp, WsAccessTokenHeader},
|
||||||
@@ -30,7 +18,7 @@ use crate::{
|
|||||||
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
pub static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
|
||||||
Arc::new(WebSocketUsers {
|
Arc::new(WebSocketUsers {
|
||||||
map: Arc::new(dashmap::DashMap::new()),
|
map: Arc::new(dashmap::DashMap::new()),
|
||||||
})
|
})
|
||||||
@@ -47,8 +35,15 @@ use super::{
|
|||||||
push_send_update, push_user_update,
|
push_send_update, push_user_update,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static NOTIFICATIONS_DISABLED: Lazy<bool> = Lazy::new(|| !CONFIG.enable_websocket() && !CONFIG.push_enabled());
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
routes![websockets_hub, anonymous_websockets_hub]
|
if CONFIG.enable_websocket() {
|
||||||
|
routes![websockets_hub, anonymous_websockets_hub]
|
||||||
|
} else {
|
||||||
|
info!("WebSocket are disabled, realtime sync functionality will not work!");
|
||||||
|
routes![]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromForm, Debug)]
|
#[derive(FromForm, Debug)]
|
||||||
@@ -108,7 +103,7 @@ impl Drop for WSAnonymousEntryMapGuard {
|
|||||||
|
|
||||||
#[get("/hub?<data..>")]
|
#[get("/hub?<data..>")]
|
||||||
fn websockets_hub<'r>(
|
fn websockets_hub<'r>(
|
||||||
ws: rocket_ws::WebSocket,
|
ws: WebSocket,
|
||||||
data: WsAccessToken,
|
data: WsAccessToken,
|
||||||
ip: ClientIp,
|
ip: ClientIp,
|
||||||
header_token: WsAccessTokenHeader,
|
header_token: WsAccessTokenHeader,
|
||||||
@@ -164,6 +159,11 @@ fn websockets_hub<'r>(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prevent sending anything back when a `Close` Message is received.
|
||||||
|
// Just break the loop
|
||||||
|
Message::Close(_) => break,
|
||||||
|
|
||||||
// Just echo anything else the client sends
|
// Just echo anything else the client sends
|
||||||
_ => yield message,
|
_ => yield message,
|
||||||
}
|
}
|
||||||
@@ -187,11 +187,7 @@ fn websockets_hub<'r>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/anonymous-hub?<token..>")]
|
#[get("/anonymous-hub?<token..>")]
|
||||||
fn anonymous_websockets_hub<'r>(
|
fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result<rocket_ws::Stream!['r], Error> {
|
||||||
ws: rocket_ws::WebSocket,
|
|
||||||
token: String,
|
|
||||||
ip: ClientIp,
|
|
||||||
) -> Result<rocket_ws::Stream!['r], Error> {
|
|
||||||
let addr = ip.ip;
|
let addr = ip.ip;
|
||||||
info!("Accepting Anonymous Rocket WS connection from {addr}");
|
info!("Accepting Anonymous Rocket WS connection from {addr}");
|
||||||
|
|
||||||
@@ -230,6 +226,11 @@ fn anonymous_websockets_hub<'r>(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prevent sending anything back when a `Close` Message is received.
|
||||||
|
// Just break the loop
|
||||||
|
Message::Close(_) => break,
|
||||||
|
|
||||||
// Just echo anything else the client sends
|
// Just echo anything else the client sends
|
||||||
_ => yield message,
|
_ => yield message,
|
||||||
}
|
}
|
||||||
@@ -287,8 +288,8 @@ fn serialize(val: Value) -> Vec<u8> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn serialize_date(date: NaiveDateTime) -> Value {
|
fn serialize_date(date: NaiveDateTime) -> Value {
|
||||||
let seconds: i64 = date.timestamp();
|
let seconds: i64 = date.and_utc().timestamp();
|
||||||
let nanos: i64 = date.timestamp_subsec_nanos().into();
|
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
|
||||||
let timestamp = nanos << 34 | seconds;
|
let timestamp = nanos << 34 | seconds;
|
||||||
|
|
||||||
let bs = timestamp.to_be_bytes();
|
let bs = timestamp.to_be_bytes();
|
||||||
@@ -339,13 +340,19 @@ impl WebSocketUsers {
|
|||||||
|
|
||||||
// NOTE: The last modified date needs to be updated before calling these methods
|
// NOTE: The last modified date needs to be updated before calling these methods
|
||||||
pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
|
pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||||
ut,
|
ut,
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.send_update(&user.uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(&user.uuid, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_user_update(ut, user);
|
push_user_update(ut, user);
|
||||||
@@ -353,13 +360,19 @@ impl WebSocketUsers {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
|
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
|
||||||
UpdateType::LogOut,
|
UpdateType::LogOut,
|
||||||
acting_device_uuid.clone(),
|
acting_device_uuid.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
self.send_update(&user.uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(&user.uuid, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_logout(user, acting_device_uuid);
|
push_logout(user, acting_device_uuid);
|
||||||
@@ -373,6 +386,10 @@ impl WebSocketUsers {
|
|||||||
acting_device_uuid: &String,
|
acting_device_uuid: &String,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![
|
vec![
|
||||||
("Id".into(), folder.uuid.clone().into()),
|
("Id".into(), folder.uuid.clone().into()),
|
||||||
@@ -383,7 +400,9 @@ impl WebSocketUsers {
|
|||||||
Some(acting_device_uuid.into()),
|
Some(acting_device_uuid.into()),
|
||||||
);
|
);
|
||||||
|
|
||||||
self.send_update(&folder.user_uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(&folder.user_uuid, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_folder_update(ut, folder, acting_device_uuid, conn).await;
|
push_folder_update(ut, folder, acting_device_uuid, conn).await;
|
||||||
@@ -399,6 +418,10 @@ impl WebSocketUsers {
|
|||||||
collection_uuids: Option<Vec<String>>,
|
collection_uuids: Option<Vec<String>>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
let org_uuid = convert_option(cipher.organization_uuid.clone());
|
||||||
// Depending if there are collections provided or not, we need to have different values for the following variables.
|
// Depending if there are collections provided or not, we need to have different values for the following variables.
|
||||||
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
|
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
|
||||||
@@ -424,8 +447,10 @@ impl WebSocketUsers {
|
|||||||
Some(acting_device_uuid.into()),
|
Some(acting_device_uuid.into()),
|
||||||
);
|
);
|
||||||
|
|
||||||
for uuid in user_uuids {
|
if CONFIG.enable_websocket() {
|
||||||
self.send_update(uuid, &data).await;
|
for uuid in user_uuids {
|
||||||
|
self.send_update(uuid, &data).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
||||||
@@ -441,6 +466,10 @@ impl WebSocketUsers {
|
|||||||
acting_device_uuid: &String,
|
acting_device_uuid: &String,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let user_uuid = convert_option(send.user_uuid.clone());
|
let user_uuid = convert_option(send.user_uuid.clone());
|
||||||
|
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
@@ -453,8 +482,10 @@ impl WebSocketUsers {
|
|||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
for uuid in user_uuids {
|
if CONFIG.enable_websocket() {
|
||||||
self.send_update(uuid, &data).await;
|
for uuid in user_uuids {
|
||||||
|
self.send_update(uuid, &data).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
if CONFIG.push_enabled() && user_uuids.len() == 1 {
|
||||||
push_send_update(ut, send, acting_device_uuid, conn).await;
|
push_send_update(ut, send, acting_device_uuid, conn).await;
|
||||||
@@ -468,12 +499,18 @@ impl WebSocketUsers {
|
|||||||
acting_device_uuid: &String,
|
acting_device_uuid: &String,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
|
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||||
UpdateType::AuthRequest,
|
UpdateType::AuthRequest,
|
||||||
Some(acting_device_uuid.to_string()),
|
Some(acting_device_uuid.to_string()),
|
||||||
);
|
);
|
||||||
self.send_update(user_uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(user_uuid, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
|
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
|
||||||
@@ -487,12 +524,18 @@ impl WebSocketUsers {
|
|||||||
approving_device_uuid: String,
|
approving_device_uuid: String,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) {
|
) {
|
||||||
|
// Skip any processing if both WebSockets and Push are not active
|
||||||
|
if *NOTIFICATIONS_DISABLED {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_update(
|
let data = create_update(
|
||||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||||
UpdateType::AuthRequestResponse,
|
UpdateType::AuthRequestResponse,
|
||||||
approving_device_uuid.clone().into(),
|
approving_device_uuid.clone().into(),
|
||||||
);
|
);
|
||||||
self.send_update(auth_response_uuid, &data).await;
|
if CONFIG.enable_websocket() {
|
||||||
|
self.send_update(auth_response_uuid, &data).await;
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.push_enabled() {
|
if CONFIG.push_enabled() {
|
||||||
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
|
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
|
||||||
@@ -516,6 +559,9 @@ impl AnonymousWebSocketSubscriptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
|
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
|
||||||
|
if !CONFIG.enable_websocket() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
let data = create_anonymous_update(
|
let data = create_anonymous_update(
|
||||||
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
|
||||||
UpdateType::AuthRequestResponse,
|
UpdateType::AuthRequestResponse,
|
||||||
@@ -610,127 +656,3 @@ pub enum UpdateType {
|
|||||||
|
|
||||||
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
|
||||||
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
|
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
|
||||||
|
|
||||||
pub fn start_notification_server() -> Arc<WebSocketUsers> {
|
|
||||||
let users = Arc::clone(&WS_USERS);
|
|
||||||
if CONFIG.websocket_enabled() {
|
|
||||||
let users2 = Arc::<WebSocketUsers>::clone(&users);
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let addr = (CONFIG.websocket_address(), CONFIG.websocket_port());
|
|
||||||
info!("Starting WebSockets server on {}:{}", addr.0, addr.1);
|
|
||||||
let listener = TcpListener::bind(addr).await.expect("Can't listen on websocket port");
|
|
||||||
|
|
||||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>();
|
|
||||||
CONFIG.set_ws_shutdown_handle(shutdown_tx);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
Ok((stream, addr)) = listener.accept() => {
|
|
||||||
tokio::spawn(handle_connection(stream, Arc::<WebSocketUsers>::clone(&users2), addr));
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = &mut shutdown_rx => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Shutting down WebSockets server!")
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
users
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_connection(stream: TcpStream, users: Arc<WebSocketUsers>, addr: SocketAddr) -> Result<(), Error> {
|
|
||||||
let mut user_uuid: Option<String> = None;
|
|
||||||
|
|
||||||
info!("Accepting WS connection from {addr}");
|
|
||||||
|
|
||||||
// Accept connection, do initial handshake, validate auth token and get the user ID
|
|
||||||
use handshake::server::{Request, Response};
|
|
||||||
let mut stream = accept_hdr_async(stream, |req: &Request, res: Response| {
|
|
||||||
if let Some(token) = get_request_token(req) {
|
|
||||||
if let Ok(claims) = crate::auth::decode_login(&token) {
|
|
||||||
user_uuid = Some(claims.sub);
|
|
||||||
return Ok(res);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(Response::builder().status(401).body(None).unwrap())
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let user_uuid = user_uuid.expect("User UUID should be set after the handshake");
|
|
||||||
|
|
||||||
let (mut rx, guard) = {
|
|
||||||
// Add a channel to send messages to this client to the map
|
|
||||||
let entry_uuid = uuid::Uuid::new_v4();
|
|
||||||
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
|
|
||||||
users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx));
|
|
||||||
|
|
||||||
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
|
|
||||||
(rx, WSEntryMapGuard::new(users, user_uuid, entry_uuid, addr.ip()))
|
|
||||||
};
|
|
||||||
|
|
||||||
let _guard = guard;
|
|
||||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
res = stream.next() => {
|
|
||||||
match res {
|
|
||||||
Some(Ok(message)) => {
|
|
||||||
match message {
|
|
||||||
// Respond to any pings
|
|
||||||
Message::Ping(ping) => stream.send(Message::Pong(ping)).await?,
|
|
||||||
Message::Pong(_) => {/* Ignored */},
|
|
||||||
|
|
||||||
// We should receive an initial message with the protocol and version, and we will reply to it
|
|
||||||
Message::Text(ref message) => {
|
|
||||||
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
|
|
||||||
|
|
||||||
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
|
|
||||||
stream.send(Message::binary(INITIAL_RESPONSE)).await?;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Just echo anything else the client sends
|
|
||||||
_ => stream.send(message).await?,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res = rx.recv() => {
|
|
||||||
match res {
|
|
||||||
Some(res) => stream.send(res).await?,
|
|
||||||
None => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = interval.tick() => stream.send(Message::Ping(create_ping())).await?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_request_token(req: &handshake::server::Request) -> Option<String> {
|
|
||||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
|
||||||
|
|
||||||
if let Some(Ok(auth)) = req.headers().get("Authorization").map(|a| a.to_str()) {
|
|
||||||
if let Some(token_part) = auth.strip_prefix("Bearer ") {
|
|
||||||
return Some(token_part.to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(params) = req.uri().query() {
|
|
||||||
let params_iter = params.split('&').take(1);
|
|
||||||
for val in params_iter {
|
|
||||||
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
|
|
||||||
return Some(stripped.to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -50,7 +50,11 @@ async fn get_auth_push_token() -> ApiResult<String> {
|
|||||||
("client_secret", &client_secret),
|
("client_secret", &client_secret),
|
||||||
];
|
];
|
||||||
|
|
||||||
let res = match get_reqwest_client().post("https://identity.bitwarden.com/connect/token").form(¶ms).send().await
|
let res = match get_reqwest_client()
|
||||||
|
.post(&format!("{}/connect/token", CONFIG.push_identity_uri()))
|
||||||
|
.form(¶ms)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
{
|
{
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
Err(e) => err!(format!("Error getting push token from bitwarden server: {e}")),
|
Err(e) => err!(format!("Error getting push token from bitwarden server: {e}")),
|
||||||
@@ -72,24 +76,35 @@ async fn get_auth_push_token() -> ApiResult<String> {
|
|||||||
Ok(push_token.access_token.clone())
|
Ok(push_token.access_token.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn register_push_device(user_uuid: String, device: Device) -> EmptyResult {
|
pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbConn) -> EmptyResult {
|
||||||
if !CONFIG.push_enabled() {
|
if !CONFIG.push_enabled() || !device.is_push_device() || device.is_registered() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let auth_push_token = get_auth_push_token().await?;
|
|
||||||
|
if device.push_token.is_none() {
|
||||||
|
warn!("Skipping the registration of the device {} because the push_token field is empty.", device.uuid);
|
||||||
|
warn!("To get rid of this message you need to clear the app data and reconnect the device.");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("Registering Device {}", device.uuid);
|
||||||
|
|
||||||
|
// generate a random push_uuid so we know the device is registered
|
||||||
|
device.push_uuid = Some(uuid::Uuid::new_v4().to_string());
|
||||||
|
|
||||||
//Needed to register a device for push to bitwarden :
|
//Needed to register a device for push to bitwarden :
|
||||||
let data = json!({
|
let data = json!({
|
||||||
"userId": user_uuid,
|
"userId": device.user_uuid,
|
||||||
"deviceId": device.push_uuid,
|
"deviceId": device.push_uuid,
|
||||||
"identifier": device.uuid,
|
"identifier": device.uuid,
|
||||||
"type": device.atype,
|
"type": device.atype,
|
||||||
"pushToken": device.push_token
|
"pushToken": device.push_token
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let auth_push_token = get_auth_push_token().await?;
|
||||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||||
|
|
||||||
get_reqwest_client()
|
if let Err(e) = get_reqwest_client()
|
||||||
.post(CONFIG.push_relay_uri() + "/push/register")
|
.post(CONFIG.push_relay_uri() + "/push/register")
|
||||||
.header(CONTENT_TYPE, "application/json")
|
.header(CONTENT_TYPE, "application/json")
|
||||||
.header(ACCEPT, "application/json")
|
.header(ACCEPT, "application/json")
|
||||||
@@ -97,12 +112,20 @@ pub async fn register_push_device(user_uuid: String, device: Device) -> EmptyRes
|
|||||||
.json(&data)
|
.json(&data)
|
||||||
.send()
|
.send()
|
||||||
.await?
|
.await?
|
||||||
.error_for_status()?;
|
.error_for_status()
|
||||||
|
{
|
||||||
|
err!(format!("An error occurred while proceeding registration of a device: {e}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = device.save(conn).await {
|
||||||
|
err!(format!("An error occurred while trying to save the (registered) device push uuid: {e}"));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn unregister_push_device(uuid: String) -> EmptyResult {
|
pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
|
||||||
if !CONFIG.push_enabled() {
|
if !CONFIG.push_enabled() || push_uuid.is_none() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let auth_push_token = get_auth_push_token().await?;
|
let auth_push_token = get_auth_push_token().await?;
|
||||||
@@ -110,7 +133,7 @@ pub async fn unregister_push_device(uuid: String) -> EmptyResult {
|
|||||||
let auth_header = format!("Bearer {}", &auth_push_token);
|
let auth_header = format!("Bearer {}", &auth_push_token);
|
||||||
|
|
||||||
match get_reqwest_client()
|
match get_reqwest_client()
|
||||||
.delete(CONFIG.push_relay_uri() + "/push/" + &uuid)
|
.delete(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap())
|
||||||
.header(AUTHORIZATION, auth_header)
|
.header(AUTHORIZATION, auth_header)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -170,11 +170,11 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
|
|||||||
}
|
}
|
||||||
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||||
"bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))),
|
"bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))),
|
||||||
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))),
|
"jdenticon-3.3.0.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon-3.3.0.js"))),
|
||||||
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||||
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||||
"jquery-3.7.0.slim.js" => {
|
"jquery-3.7.1.slim.js" => {
|
||||||
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.0.slim.js")))
|
Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.1.slim.js")))
|
||||||
}
|
}
|
||||||
_ => err!(format!("Static file not found: {filename}")),
|
_ => err!(format!("Static file not found: {filename}")),
|
||||||
}
|
}
|
||||||
|
|||||||
138
src/auth.rs
138
src/auth.rs
@@ -1,10 +1,11 @@
|
|||||||
// JWT Handling
|
// JWT Handling
|
||||||
//
|
//
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use num_traits::FromPrimitive;
|
use num_traits::FromPrimitive;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::{Lazy, OnceCell};
|
||||||
|
|
||||||
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
||||||
|
use openssl::rsa::Rsa;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde::ser::Serialize;
|
use serde::ser::Serialize;
|
||||||
|
|
||||||
@@ -12,7 +13,7 @@ use crate::{error::Error, CONFIG};
|
|||||||
|
|
||||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||||
|
|
||||||
pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2));
|
pub static DEFAULT_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_hours(2).unwrap());
|
||||||
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
||||||
|
|
||||||
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
|
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
|
||||||
@@ -26,23 +27,46 @@ static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.do
|
|||||||
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
|
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
|
||||||
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
|
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
|
||||||
|
|
||||||
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| {
|
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
|
||||||
let key =
|
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
|
||||||
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key. \n{e}"));
|
|
||||||
EncodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}"))
|
|
||||||
});
|
|
||||||
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
|
|
||||||
let key = std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key. \n{e}"));
|
|
||||||
DecodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}"))
|
|
||||||
});
|
|
||||||
|
|
||||||
pub fn load_keys() {
|
pub fn initialize_keys() -> Result<(), crate::error::Error> {
|
||||||
Lazy::force(&PRIVATE_RSA_KEY);
|
let mut priv_key_buffer = Vec::with_capacity(2048);
|
||||||
Lazy::force(&PUBLIC_RSA_KEY);
|
|
||||||
|
let priv_key = {
|
||||||
|
let mut priv_key_file =
|
||||||
|
File::options().create(true).truncate(false).read(true).write(true).open(CONFIG.private_rsa_key())?;
|
||||||
|
|
||||||
|
#[allow(clippy::verbose_file_reads)]
|
||||||
|
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
|
||||||
|
|
||||||
|
if bytes_read > 0 {
|
||||||
|
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
|
||||||
|
} else {
|
||||||
|
// Only create the key if the file doesn't exist or is empty
|
||||||
|
let rsa_key = openssl::rsa::Rsa::generate(2048)?;
|
||||||
|
priv_key_buffer = rsa_key.private_key_to_pem()?;
|
||||||
|
priv_key_file.write_all(&priv_key_buffer)?;
|
||||||
|
info!("Private key created correctly.");
|
||||||
|
rsa_key
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let pub_key_buffer = priv_key.public_key_to_pem()?;
|
||||||
|
|
||||||
|
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
|
||||||
|
let dec: DecodingKey = DecodingKey::from_rsa_pem(&pub_key_buffer)?;
|
||||||
|
if PRIVATE_RSA_KEY.set(enc).is_err() {
|
||||||
|
err!("PRIVATE_RSA_KEY must only be initialized once")
|
||||||
|
}
|
||||||
|
if PUBLIC_RSA_KEY.set(dec).is_err() {
|
||||||
|
err!("PUBLIC_RSA_KEY must only be initialized once")
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
|
||||||
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) {
|
match jsonwebtoken::encode(&JWT_HEADER, claims, PRIVATE_RSA_KEY.wait()) {
|
||||||
Ok(token) => token,
|
Ok(token) => token,
|
||||||
Err(e) => panic!("Error encoding jwt {e}"),
|
Err(e) => panic!("Error encoding jwt {e}"),
|
||||||
}
|
}
|
||||||
@@ -56,7 +80,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
|
|||||||
validation.set_issuer(&[issuer]);
|
validation.set_issuer(&[issuer]);
|
||||||
|
|
||||||
let token = token.replace(char::is_whitespace, "");
|
let token = token.replace(char::is_whitespace, "");
|
||||||
match jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation) {
|
match jsonwebtoken::decode(&token, PUBLIC_RSA_KEY.wait(), &validation) {
|
||||||
Ok(d) => Ok(d.claims),
|
Ok(d) => Ok(d.claims),
|
||||||
Err(err) => match *err.kind() {
|
Err(err) => match *err.kind() {
|
||||||
ErrorKind::InvalidToken => err!("Token is invalid"),
|
ErrorKind::InvalidToken => err!("Token is invalid"),
|
||||||
@@ -119,10 +143,16 @@ pub struct LoginJwtClaims {
|
|||||||
pub email: String,
|
pub email: String,
|
||||||
pub email_verified: bool,
|
pub email_verified: bool,
|
||||||
|
|
||||||
pub orgowner: Vec<String>,
|
// ---
|
||||||
pub orgadmin: Vec<String>,
|
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||||
pub orguser: Vec<String>,
|
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||||
pub orgmanager: Vec<String>,
|
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
|
// ---
|
||||||
|
// pub orgowner: Vec<String>,
|
||||||
|
// pub orgadmin: Vec<String>,
|
||||||
|
// pub orguser: Vec<String>,
|
||||||
|
// pub orgmanager: Vec<String>,
|
||||||
|
|
||||||
// user security_stamp
|
// user security_stamp
|
||||||
pub sstamp: String,
|
pub sstamp: String,
|
||||||
@@ -158,11 +188,11 @@ pub fn generate_invite_claims(
|
|||||||
user_org_id: Option<String>,
|
user_org_id: Option<String>,
|
||||||
invited_by_email: Option<String>,
|
invited_by_email: Option<String>,
|
||||||
) -> InviteJwtClaims {
|
) -> InviteJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
InviteJwtClaims {
|
InviteJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_INVITE_ISSUER.to_string(),
|
iss: JWT_INVITE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
email,
|
email,
|
||||||
@@ -196,11 +226,11 @@ pub fn generate_emergency_access_invite_claims(
|
|||||||
grantor_name: String,
|
grantor_name: String,
|
||||||
grantor_email: String,
|
grantor_email: String,
|
||||||
) -> EmergencyAccessInviteJwtClaims {
|
) -> EmergencyAccessInviteJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
EmergencyAccessInviteJwtClaims {
|
EmergencyAccessInviteJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
email,
|
email,
|
||||||
@@ -227,10 +257,10 @@ pub struct OrgApiKeyLoginJwtClaims {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
|
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
OrgApiKeyLoginJwtClaims {
|
OrgApiKeyLoginJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(1)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
|
||||||
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
client_id: format!("organization.{org_id}"),
|
client_id: format!("organization.{org_id}"),
|
||||||
@@ -254,10 +284,10 @@ pub struct FileDownloadClaims {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
|
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
FileDownloadClaims {
|
FileDownloadClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(5)).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(),
|
||||||
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
file_id,
|
file_id,
|
||||||
@@ -277,42 +307,42 @@ pub struct BasicJwtClaims {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_DELETE_ISSUER.to_string(),
|
iss: JWT_DELETE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_admin_claims() -> BasicJwtClaims {
|
pub fn generate_admin_claims() -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(CONFIG.admin_session_lifetime()).unwrap()).timestamp(),
|
||||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||||
sub: "admin_panel".to_string(),
|
sub: "admin_panel".to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(2)).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(2).unwrap()).timestamp(),
|
||||||
iss: JWT_SEND_ISSUER.to_string(),
|
iss: JWT_SEND_ISSUER.to_string(),
|
||||||
sub: format!("{send_id}/{file_id}"),
|
sub: format!("{send_id}/{file_id}"),
|
||||||
}
|
}
|
||||||
@@ -361,10 +391,8 @@ impl<'r> FromRequest<'r> for Host {
|
|||||||
|
|
||||||
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
|
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
|
||||||
host
|
host
|
||||||
} else if let Some(host) = headers.get_one("Host") {
|
|
||||||
host
|
|
||||||
} else {
|
} else {
|
||||||
""
|
headers.get_one("Host").unwrap_or_default()
|
||||||
};
|
};
|
||||||
|
|
||||||
format!("{protocol}://{host}")
|
format!("{protocol}://{host}")
|
||||||
@@ -377,7 +405,6 @@ impl<'r> FromRequest<'r> for Host {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct ClientHeaders {
|
pub struct ClientHeaders {
|
||||||
pub host: String,
|
|
||||||
pub device_type: i32,
|
pub device_type: i32,
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
}
|
}
|
||||||
@@ -387,7 +414,6 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
|||||||
type Error = &'static str;
|
type Error = &'static str;
|
||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
let host = try_outcome!(Host::from_request(request).await).host;
|
|
||||||
let ip = match ClientIp::from_request(request).await {
|
let ip = match ClientIp::from_request(request).await {
|
||||||
Outcome::Success(ip) => ip,
|
Outcome::Success(ip) => ip,
|
||||||
_ => err_handler!("Error getting Client IP"),
|
_ => err_handler!("Error getting Client IP"),
|
||||||
@@ -397,7 +423,6 @@ impl<'r> FromRequest<'r> for ClientHeaders {
|
|||||||
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
|
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
|
||||||
|
|
||||||
Outcome::Success(ClientHeaders {
|
Outcome::Success(ClientHeaders {
|
||||||
host,
|
|
||||||
device_type,
|
device_type,
|
||||||
ip,
|
ip,
|
||||||
})
|
})
|
||||||
@@ -469,7 +494,7 @@ impl<'r> FromRequest<'r> for Headers {
|
|||||||
// Check if the stamp exception has expired first.
|
// Check if the stamp exception has expired first.
|
||||||
// Then, check if the current route matches any of the allowed routes.
|
// Then, check if the current route matches any of the allowed routes.
|
||||||
// After that check the stamp in exception matches the one in the claims.
|
// After that check the stamp in exception matches the one in the claims.
|
||||||
if Utc::now().naive_utc().timestamp() > stamp_exception.expire {
|
if Utc::now().timestamp() > stamp_exception.expire {
|
||||||
// If the stamp exception has been expired remove it from the database.
|
// If the stamp exception has been expired remove it from the database.
|
||||||
// This prevents checking this stamp exception for new requests.
|
// This prevents checking this stamp exception for new requests.
|
||||||
let mut user = user;
|
let mut user = user;
|
||||||
@@ -503,7 +528,6 @@ pub struct OrgHeaders {
|
|||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user_type: UserOrgType,
|
pub org_user_type: UserOrgType,
|
||||||
pub org_user: UserOrganization,
|
pub org_user: UserOrganization,
|
||||||
pub org_id: String,
|
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -566,7 +590,6 @@ impl<'r> FromRequest<'r> for OrgHeaders {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
org_user,
|
org_user,
|
||||||
org_id: String::from(org_id),
|
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -643,7 +666,6 @@ pub struct ManagerHeaders {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user_type: UserOrgType,
|
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -661,7 +683,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
|||||||
_ => err_handler!("Error getting DB"),
|
_ => err_handler!("Error getting DB"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
||||||
err_handler!("The current user isn't a manager for this collection")
|
err_handler!("The current user isn't a manager for this collection")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -672,7 +694,6 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
|||||||
host: headers.host,
|
host: headers.host,
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
org_user_type: headers.org_user_type,
|
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@@ -699,7 +720,6 @@ pub struct ManagerHeadersLoose {
|
|||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user: UserOrganization,
|
pub org_user: UserOrganization,
|
||||||
pub org_user_type: UserOrgType,
|
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -715,7 +735,6 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
|
|||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
org_user: headers.org_user,
|
org_user: headers.org_user,
|
||||||
org_user_type: headers.org_user_type,
|
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@@ -734,10 +753,6 @@ impl From<ManagerHeadersLoose> for Headers {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
|
||||||
org_user.has_full_access()
|
|
||||||
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ManagerHeaders {
|
impl ManagerHeaders {
|
||||||
pub async fn from_loose(
|
pub async fn from_loose(
|
||||||
@@ -749,7 +764,7 @@ impl ManagerHeaders {
|
|||||||
if uuid::Uuid::parse_str(col_id).is_err() {
|
if uuid::Uuid::parse_str(col_id).is_err() {
|
||||||
err!("Collection Id is malformed!");
|
err!("Collection Id is malformed!");
|
||||||
}
|
}
|
||||||
if !can_access_collection(&h.org_user, col_id, conn).await {
|
if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
|
||||||
err!("You don't have access to all collections!");
|
err!("You don't have access to all collections!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -758,14 +773,12 @@ impl ManagerHeaders {
|
|||||||
host: h.host,
|
host: h.host,
|
||||||
device: h.device,
|
device: h.device,
|
||||||
user: h.user,
|
user: h.user,
|
||||||
org_user_type: h.org_user_type,
|
|
||||||
ip: h.ip,
|
ip: h.ip,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct OwnerHeaders {
|
pub struct OwnerHeaders {
|
||||||
pub host: String,
|
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub ip: ClientIp,
|
pub ip: ClientIp,
|
||||||
@@ -779,7 +792,6 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
|||||||
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
let headers = try_outcome!(OrgHeaders::from_request(request).await);
|
||||||
if headers.org_user_type == UserOrgType::Owner {
|
if headers.org_user_type == UserOrgType::Owner {
|
||||||
Outcome::Success(Self {
|
Outcome::Success(Self {
|
||||||
host: headers.host,
|
|
||||||
device: headers.device,
|
device: headers.device,
|
||||||
user: headers.user,
|
user: headers.user,
|
||||||
ip: headers.ip,
|
ip: headers.ip,
|
||||||
@@ -793,7 +805,11 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
|
|||||||
//
|
//
|
||||||
// Client IP address detection
|
// Client IP address detection
|
||||||
//
|
//
|
||||||
use std::net::IpAddr;
|
use std::{
|
||||||
|
fs::File,
|
||||||
|
io::{Read, Write},
|
||||||
|
net::IpAddr,
|
||||||
|
};
|
||||||
|
|
||||||
pub struct ClientIp {
|
pub struct ClientIp {
|
||||||
pub ip: IpAddr,
|
pub ip: IpAddr,
|
||||||
|
|||||||
145
src/config.rs
145
src/config.rs
@@ -9,7 +9,7 @@ use reqwest::Url;
|
|||||||
use crate::{
|
use crate::{
|
||||||
db::DbConnType,
|
db::DbConnType,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_env, get_env_bool},
|
util::{get_env, get_env_bool, parse_experimental_client_feature_flags},
|
||||||
};
|
};
|
||||||
|
|
||||||
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
||||||
@@ -39,7 +39,6 @@ macro_rules! make_config {
|
|||||||
|
|
||||||
struct Inner {
|
struct Inner {
|
||||||
rocket_shutdown_handle: Option<rocket::Shutdown>,
|
rocket_shutdown_handle: Option<rocket::Shutdown>,
|
||||||
ws_shutdown_handle: Option<tokio::sync::oneshot::Sender<()>>,
|
|
||||||
|
|
||||||
templates: Handlebars<'static>,
|
templates: Handlebars<'static>,
|
||||||
config: ConfigItems,
|
config: ConfigItems,
|
||||||
@@ -361,7 +360,7 @@ make_config! {
|
|||||||
/// Sends folder
|
/// Sends folder
|
||||||
sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends");
|
sends_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "sends");
|
||||||
/// Temp folder |> Used for storing temporary file uploads
|
/// Temp folder |> Used for storing temporary file uploads
|
||||||
tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp");
|
tmp_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "tmp");
|
||||||
/// Templates folder
|
/// Templates folder
|
||||||
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
templates_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "templates");
|
||||||
/// Session JWT key
|
/// Session JWT key
|
||||||
@@ -371,17 +370,15 @@ make_config! {
|
|||||||
},
|
},
|
||||||
ws {
|
ws {
|
||||||
/// Enable websocket notifications
|
/// Enable websocket notifications
|
||||||
websocket_enabled: bool, false, def, false;
|
enable_websocket: bool, false, def, true;
|
||||||
/// Websocket address
|
|
||||||
websocket_address: String, false, def, "0.0.0.0".to_string();
|
|
||||||
/// Websocket port
|
|
||||||
websocket_port: u16, false, def, 3012;
|
|
||||||
},
|
},
|
||||||
push {
|
push {
|
||||||
/// Enable push notifications
|
/// Enable push notifications
|
||||||
push_enabled: bool, false, def, false;
|
push_enabled: bool, false, def, false;
|
||||||
/// Push relay base uri
|
/// Push relay uri
|
||||||
push_relay_uri: String, false, def, "https://push.bitwarden.com".to_string();
|
push_relay_uri: String, false, def, "https://push.bitwarden.com".to_string();
|
||||||
|
/// Push identity uri
|
||||||
|
push_identity_uri: String, false, def, "https://identity.bitwarden.com".to_string();
|
||||||
/// Installation id |> The installation id from https://bitwarden.com/host
|
/// Installation id |> The installation id from https://bitwarden.com/host
|
||||||
push_installation_id: Pass, false, def, String::new();
|
push_installation_id: Pass, false, def, String::new();
|
||||||
/// Installation key |> The installation key from https://bitwarden.com/host
|
/// Installation key |> The installation key from https://bitwarden.com/host
|
||||||
@@ -440,6 +437,8 @@ make_config! {
|
|||||||
user_attachment_limit: i64, true, option;
|
user_attachment_limit: i64, true, option;
|
||||||
/// Per-organization attachment storage limit (KB) |> Max kilobytes of attachment storage allowed per org. When this limit is reached, org members will not be allowed to upload further attachments for ciphers owned by that org.
|
/// Per-organization attachment storage limit (KB) |> Max kilobytes of attachment storage allowed per org. When this limit is reached, org members will not be allowed to upload further attachments for ciphers owned by that org.
|
||||||
org_attachment_limit: i64, true, option;
|
org_attachment_limit: i64, true, option;
|
||||||
|
/// Per-user send storage limit (KB) |> Max kilobytes of sends storage allowed per user. When this limit is reached, the user will not be allowed to upload further sends.
|
||||||
|
user_send_limit: i64, true, option;
|
||||||
|
|
||||||
/// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item.
|
/// Trash auto-delete days |> Number of days to wait before auto-deleting a trashed item.
|
||||||
/// If unset, trashed items are not auto-deleted. This setting applies globally, so make
|
/// If unset, trashed items are not auto-deleted. This setting applies globally, so make
|
||||||
@@ -478,7 +477,7 @@ make_config! {
|
|||||||
/// Invitation token expiration time (in hours) |> The number of hours after which an organization invite token, emergency access invite token,
|
/// Invitation token expiration time (in hours) |> The number of hours after which an organization invite token, emergency access invite token,
|
||||||
/// email verification token and deletion request token will expire (must be at least 1)
|
/// email verification token and deletion request token will expire (must be at least 1)
|
||||||
invitation_expiration_hours: u32, false, def, 120;
|
invitation_expiration_hours: u32, false, def, 120;
|
||||||
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
/// Enable emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
|
||||||
emergency_access_allowed: bool, true, def, true;
|
emergency_access_allowed: bool, true, def, true;
|
||||||
/// Allow email change |> Controls whether users can change their email. This setting applies globally to all users.
|
/// Allow email change |> Controls whether users can change their email. This setting applies globally to all users.
|
||||||
email_change_allowed: bool, true, def, true;
|
email_change_allowed: bool, true, def, true;
|
||||||
@@ -547,6 +546,9 @@ make_config! {
|
|||||||
/// TOTP codes of the previous and next 30 seconds will be invalid.
|
/// TOTP codes of the previous and next 30 seconds will be invalid.
|
||||||
authenticator_disable_time_drift: bool, true, def, false;
|
authenticator_disable_time_drift: bool, true, def, false;
|
||||||
|
|
||||||
|
/// Customize the enabled feature flags on the clients |> This is a comma separated list of feature flags to enable.
|
||||||
|
experimental_client_feature_flags: String, false, def, "fido2-vault-credentials".to_string();
|
||||||
|
|
||||||
/// Require new device emails |> When a user logs in an email is required to be sent.
|
/// Require new device emails |> When a user logs in an email is required to be sent.
|
||||||
/// If sending the email fails the login attempt will fail.
|
/// If sending the email fails the login attempt will fail.
|
||||||
require_device_email: bool, true, def, false;
|
require_device_email: bool, true, def, false;
|
||||||
@@ -684,6 +686,10 @@ make_config! {
|
|||||||
email_expiration_time: u64, true, def, 600;
|
email_expiration_time: u64, true, def, 600;
|
||||||
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
|
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
|
||||||
email_attempts_limit: u64, true, def, 3;
|
email_attempts_limit: u64, true, def, 3;
|
||||||
|
/// Automatically enforce at login |> Setup email 2FA provider regardless of any organization policy
|
||||||
|
email_2fa_enforce_on_verified_invite: bool, true, def, false;
|
||||||
|
/// Auto-enable 2FA (Know the risks!) |> Automatically setup email 2FA as fallback provider when needed
|
||||||
|
email_2fa_auto_fallback: bool, true, def, false;
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -751,6 +757,57 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.push_enabled {
|
||||||
|
let push_relay_uri = cfg.push_relay_uri.to_lowercase();
|
||||||
|
if !push_relay_uri.starts_with("https://") {
|
||||||
|
err!("`PUSH_RELAY_URI` must start with 'https://'.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if Url::parse(&push_relay_uri).is_err() {
|
||||||
|
err!("Invalid URL format for `PUSH_RELAY_URI`.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let push_identity_uri = cfg.push_identity_uri.to_lowercase();
|
||||||
|
if !push_identity_uri.starts_with("https://") {
|
||||||
|
err!("`PUSH_IDENTITY_URI` must start with 'https://'.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if Url::parse(&push_identity_uri).is_err() {
|
||||||
|
err!("Invalid URL format for `PUSH_IDENTITY_URI`.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: deal with deprecated flags so they can be removed from this list, cf. #4263
|
||||||
|
const KNOWN_FLAGS: &[&str] =
|
||||||
|
&["autofill-overlay", "autofill-v2", "browser-fileless-import", "fido2-vault-credentials"];
|
||||||
|
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
|
||||||
|
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();
|
||||||
|
if !invalid_flags.is_empty() {
|
||||||
|
err!(format!("Unrecognized experimental client feature flags: {invalid_flags:?}.\n\n\
|
||||||
|
Please ensure all feature flags are spelled correctly and that they are supported in this version.\n\
|
||||||
|
Supported flags: {KNOWN_FLAGS:?}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
const MAX_FILESIZE_KB: i64 = i64::MAX >> 10;
|
||||||
|
|
||||||
|
if let Some(limit) = cfg.user_attachment_limit {
|
||||||
|
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||||
|
err!("`USER_ATTACHMENT_LIMIT` is out of bounds");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(limit) = cfg.org_attachment_limit {
|
||||||
|
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||||
|
err!("`ORG_ATTACHMENT_LIMIT` is out of bounds");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(limit) = cfg.user_send_limit {
|
||||||
|
if !(0i64..=MAX_FILESIZE_KB).contains(&limit) {
|
||||||
|
err!("`USER_SEND_LIMIT` is out of bounds");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if cfg._enable_duo
|
if cfg._enable_duo
|
||||||
&& (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
|
&& (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some())
|
||||||
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
|
&& !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some())
|
||||||
@@ -835,6 +892,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
err!("To enable email 2FA, a mail transport must be configured")
|
err!("To enable email 2FA, a mail transport must be configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !cfg._enable_email_2fa && cfg.email_2fa_enforce_on_verified_invite {
|
||||||
|
err!("To enforce email 2FA on verified invitations, email 2fa has to be enabled!");
|
||||||
|
}
|
||||||
|
if !cfg._enable_email_2fa && cfg.email_2fa_auto_fallback {
|
||||||
|
err!("To use email 2FA as automatic fallback, email 2fa has to be enabled!");
|
||||||
|
}
|
||||||
|
|
||||||
// Check if the icon blacklist regex is valid
|
// Check if the icon blacklist regex is valid
|
||||||
if let Some(ref r) = cfg.icon_blacklist_regex {
|
if let Some(ref r) = cfg.icon_blacklist_regex {
|
||||||
let validate_regex = regex::Regex::new(r);
|
let validate_regex = regex::Regex::new(r);
|
||||||
@@ -1013,7 +1077,6 @@ impl Config {
|
|||||||
Ok(Config {
|
Ok(Config {
|
||||||
inner: RwLock::new(Inner {
|
inner: RwLock::new(Inner {
|
||||||
rocket_shutdown_handle: None,
|
rocket_shutdown_handle: None,
|
||||||
ws_shutdown_handle: None,
|
|
||||||
templates: load_templates(&config.templates_folder),
|
templates: load_templates(&config.templates_folder),
|
||||||
config,
|
config,
|
||||||
_env,
|
_env,
|
||||||
@@ -1106,7 +1169,7 @@ impl Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_user_config(&self) -> Result<(), Error> {
|
pub fn delete_user_config(&self) -> Result<(), Error> {
|
||||||
crate::util::delete_file(&CONFIG_FILE)?;
|
std::fs::remove_file(&*CONFIG_FILE)?;
|
||||||
|
|
||||||
// Empty user config
|
// Empty user config
|
||||||
let usr = ConfigBuilder::default();
|
let usr = ConfigBuilder::default();
|
||||||
@@ -1131,9 +1194,6 @@ impl Config {
|
|||||||
pub fn private_rsa_key(&self) -> String {
|
pub fn private_rsa_key(&self) -> String {
|
||||||
format!("{}.pem", CONFIG.rsa_key_filename())
|
format!("{}.pem", CONFIG.rsa_key_filename())
|
||||||
}
|
}
|
||||||
pub fn public_rsa_key(&self) -> String {
|
|
||||||
format!("{}.pub.pem", CONFIG.rsa_key_filename())
|
|
||||||
}
|
|
||||||
pub fn mail_enabled(&self) -> bool {
|
pub fn mail_enabled(&self) -> bool {
|
||||||
let inner = &self.inner.read().unwrap().config;
|
let inner = &self.inner.read().unwrap().config;
|
||||||
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
|
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
|
||||||
@@ -1182,16 +1242,8 @@ impl Config {
|
|||||||
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
|
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_ws_shutdown_handle(&self, handle: tokio::sync::oneshot::Sender<()>) {
|
|
||||||
self.inner.write().unwrap().ws_shutdown_handle = Some(handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn shutdown(&self) {
|
pub fn shutdown(&self) {
|
||||||
if let Ok(mut c) = self.inner.write() {
|
if let Ok(mut c) = self.inner.write() {
|
||||||
if let Some(handle) = c.ws_shutdown_handle.take() {
|
|
||||||
handle.send(()).ok();
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(handle) = c.rocket_shutdown_handle.take() {
|
if let Some(handle) = c.rocket_shutdown_handle.take() {
|
||||||
handle.notify();
|
handle.notify();
|
||||||
}
|
}
|
||||||
@@ -1199,7 +1251,10 @@ impl Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, Renderable};
|
use handlebars::{
|
||||||
|
Context, DirectorySourceOptions, Handlebars, Helper, HelperResult, Output, RenderContext, RenderErrorReason,
|
||||||
|
Renderable,
|
||||||
|
};
|
||||||
|
|
||||||
fn load_templates<P>(path: P) -> Handlebars<'static>
|
fn load_templates<P>(path: P) -> Handlebars<'static>
|
||||||
where
|
where
|
||||||
@@ -1243,17 +1298,18 @@ where
|
|||||||
reg!("email/invite_accepted", ".html");
|
reg!("email/invite_accepted", ".html");
|
||||||
reg!("email/invite_confirmed", ".html");
|
reg!("email/invite_confirmed", ".html");
|
||||||
reg!("email/new_device_logged_in", ".html");
|
reg!("email/new_device_logged_in", ".html");
|
||||||
|
reg!("email/protected_action", ".html");
|
||||||
reg!("email/pw_hint_none", ".html");
|
reg!("email/pw_hint_none", ".html");
|
||||||
reg!("email/pw_hint_some", ".html");
|
reg!("email/pw_hint_some", ".html");
|
||||||
reg!("email/send_2fa_removed_from_org", ".html");
|
reg!("email/send_2fa_removed_from_org", ".html");
|
||||||
reg!("email/send_single_org_removed_from_org", ".html");
|
|
||||||
reg!("email/send_org_invite", ".html");
|
|
||||||
reg!("email/send_emergency_access_invite", ".html");
|
reg!("email/send_emergency_access_invite", ".html");
|
||||||
|
reg!("email/send_org_invite", ".html");
|
||||||
|
reg!("email/send_single_org_removed_from_org", ".html");
|
||||||
|
reg!("email/smtp_test", ".html");
|
||||||
reg!("email/twofactor_email", ".html");
|
reg!("email/twofactor_email", ".html");
|
||||||
reg!("email/verify_email", ".html");
|
reg!("email/verify_email", ".html");
|
||||||
reg!("email/welcome", ".html");
|
|
||||||
reg!("email/welcome_must_verify", ".html");
|
reg!("email/welcome_must_verify", ".html");
|
||||||
reg!("email/smtp_test", ".html");
|
reg!("email/welcome", ".html");
|
||||||
|
|
||||||
reg!("admin/base");
|
reg!("admin/base");
|
||||||
reg!("admin/login");
|
reg!("admin/login");
|
||||||
@@ -1267,19 +1323,27 @@ where
|
|||||||
// And then load user templates to overwrite the defaults
|
// And then load user templates to overwrite the defaults
|
||||||
// Use .hbs extension for the files
|
// Use .hbs extension for the files
|
||||||
// Templates get registered with their relative name
|
// Templates get registered with their relative name
|
||||||
hb.register_templates_directory(".hbs", path).unwrap();
|
hb.register_templates_directory(
|
||||||
|
path,
|
||||||
|
DirectorySourceOptions {
|
||||||
|
tpl_extension: ".hbs".to_owned(),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
hb
|
hb
|
||||||
}
|
}
|
||||||
|
|
||||||
fn case_helper<'reg, 'rc>(
|
fn case_helper<'reg, 'rc>(
|
||||||
h: &Helper<'reg, 'rc>,
|
h: &Helper<'rc>,
|
||||||
r: &'reg Handlebars<'_>,
|
r: &'reg Handlebars<'_>,
|
||||||
ctx: &'rc Context,
|
ctx: &'rc Context,
|
||||||
rc: &mut RenderContext<'reg, 'rc>,
|
rc: &mut RenderContext<'reg, 'rc>,
|
||||||
out: &mut dyn Output,
|
out: &mut dyn Output,
|
||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?;
|
let param =
|
||||||
|
h.param(0).ok_or_else(|| RenderErrorReason::Other(String::from("Param not found for helper \"case\"")))?;
|
||||||
let value = param.value().clone();
|
let value = param.value().clone();
|
||||||
|
|
||||||
if h.params().iter().skip(1).any(|x| x.value() == &value) {
|
if h.params().iter().skip(1).any(|x| x.value() == &value) {
|
||||||
@@ -1290,17 +1354,21 @@ fn case_helper<'reg, 'rc>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn js_escape_helper<'reg, 'rc>(
|
fn js_escape_helper<'reg, 'rc>(
|
||||||
h: &Helper<'reg, 'rc>,
|
h: &Helper<'rc>,
|
||||||
_r: &'reg Handlebars<'_>,
|
_r: &'reg Handlebars<'_>,
|
||||||
_ctx: &'rc Context,
|
_ctx: &'rc Context,
|
||||||
_rc: &mut RenderContext<'reg, 'rc>,
|
_rc: &mut RenderContext<'reg, 'rc>,
|
||||||
out: &mut dyn Output,
|
out: &mut dyn Output,
|
||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"jsesc\""))?;
|
let param =
|
||||||
|
h.param(0).ok_or_else(|| RenderErrorReason::Other(String::from("Param not found for helper \"jsesc\"")))?;
|
||||||
|
|
||||||
let no_quote = h.param(1).is_some();
|
let no_quote = h.param(1).is_some();
|
||||||
|
|
||||||
let value = param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"jsesc\" is not a String"))?;
|
let value = param
|
||||||
|
.value()
|
||||||
|
.as_str()
|
||||||
|
.ok_or_else(|| RenderErrorReason::Other(String::from("Param for helper \"jsesc\" is not a String")))?;
|
||||||
|
|
||||||
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||||
if !no_quote {
|
if !no_quote {
|
||||||
@@ -1312,15 +1380,18 @@ fn js_escape_helper<'reg, 'rc>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn to_json<'reg, 'rc>(
|
fn to_json<'reg, 'rc>(
|
||||||
h: &Helper<'reg, 'rc>,
|
h: &Helper<'rc>,
|
||||||
_r: &'reg Handlebars<'_>,
|
_r: &'reg Handlebars<'_>,
|
||||||
_ctx: &'rc Context,
|
_ctx: &'rc Context,
|
||||||
_rc: &mut RenderContext<'reg, 'rc>,
|
_rc: &mut RenderContext<'reg, 'rc>,
|
||||||
out: &mut dyn Output,
|
out: &mut dyn Output,
|
||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param = h.param(0).ok_or_else(|| RenderError::new("Expected 1 parameter for \"to_json\""))?.value();
|
let param = h
|
||||||
|
.param(0)
|
||||||
|
.ok_or_else(|| RenderErrorReason::Other(String::from("Expected 1 parameter for \"to_json\"")))?
|
||||||
|
.value();
|
||||||
let json = serde_json::to_string(param)
|
let json = serde_json::to_string(param)
|
||||||
.map_err(|e| RenderError::new(format!("Can't serialize parameter to JSON: {e}")))?;
|
.map_err(|e| RenderErrorReason::Other(format!("Can't serialize parameter to JSON: {e}")))?;
|
||||||
out.write(&json)?;
|
out.write(&json)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ use diesel::{
|
|||||||
|
|
||||||
use rocket::{
|
use rocket::{
|
||||||
http::Status,
|
http::Status,
|
||||||
outcome::IntoOutcome,
|
|
||||||
request::{FromRequest, Outcome},
|
request::{FromRequest, Outcome},
|
||||||
Request,
|
Request,
|
||||||
};
|
};
|
||||||
@@ -390,13 +389,13 @@ pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
|
|||||||
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
|
||||||
db_run! {@raw conn:
|
db_run! {@raw conn:
|
||||||
postgresql, mysql {
|
postgresql, mysql {
|
||||||
sql_function!{
|
define_sql_function!{
|
||||||
fn version() -> diesel::sql_types::Text;
|
fn version() -> diesel::sql_types::Text;
|
||||||
}
|
}
|
||||||
diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||||
}
|
}
|
||||||
sqlite {
|
sqlite {
|
||||||
sql_function!{
|
define_sql_function!{
|
||||||
fn sqlite_version() -> diesel::sql_types::Text;
|
fn sqlite_version() -> diesel::sql_types::Text;
|
||||||
}
|
}
|
||||||
diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
|
||||||
@@ -413,8 +412,11 @@ impl<'r> FromRequest<'r> for DbConn {
|
|||||||
|
|
||||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||||
match request.rocket().state::<DbPool>() {
|
match request.rocket().state::<DbPool>() {
|
||||||
Some(p) => p.get().await.map_err(|_| ()).into_outcome(Status::ServiceUnavailable),
|
Some(p) => match p.get().await {
|
||||||
None => Outcome::Failure((Status::InternalServerError, ())),
|
Ok(dbconn) => Outcome::Success(dbconn),
|
||||||
|
_ => Outcome::Error((Status::ServiceUnavailable, ())),
|
||||||
|
},
|
||||||
|
None => Outcome::Error((Status::InternalServerError, ())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use std::io::ErrorKind;
|
use std::io::ErrorKind;
|
||||||
|
|
||||||
|
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
@@ -13,14 +14,14 @@ db_object! {
|
|||||||
pub id: String,
|
pub id: String,
|
||||||
pub cipher_uuid: String,
|
pub cipher_uuid: String,
|
||||||
pub file_name: String, // encrypted
|
pub file_name: String, // encrypted
|
||||||
pub file_size: i32,
|
pub file_size: i64,
|
||||||
pub akey: Option<String>,
|
pub akey: Option<String>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl Attachment {
|
impl Attachment {
|
||||||
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i32, akey: Option<String>) -> Self {
|
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i64, akey: Option<String>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
id,
|
id,
|
||||||
cipher_uuid,
|
cipher_uuid,
|
||||||
@@ -41,13 +42,13 @@ impl Attachment {
|
|||||||
|
|
||||||
pub fn to_json(&self, host: &str) -> Value {
|
pub fn to_json(&self, host: &str) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Id": self.id,
|
"id": self.id,
|
||||||
"Url": self.get_url(host),
|
"url": self.get_url(host),
|
||||||
"FileName": self.file_name,
|
"fileName": self.file_name,
|
||||||
"Size": self.file_size.to_string(),
|
"size": self.file_size.to_string(),
|
||||||
"SizeName": crate::util::get_display_size(self.file_size),
|
"sizeName": crate::util::get_display_size(self.file_size),
|
||||||
"Key": self.akey,
|
"key": self.akey,
|
||||||
"Object": "attachment"
|
"object": "attachment"
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -94,7 +95,7 @@ impl Attachment {
|
|||||||
|
|
||||||
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
crate::util::retry(
|
let _: () = crate::util::retry(
|
||||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||||
10,
|
10,
|
||||||
)
|
)
|
||||||
@@ -102,7 +103,7 @@ impl Attachment {
|
|||||||
|
|
||||||
let file_path = &self.get_file_path();
|
let file_path = &self.get_file_path();
|
||||||
|
|
||||||
match crate::util::delete_file(file_path) {
|
match std::fs::remove_file(file_path) {
|
||||||
// Ignore "file not found" errors. This can happen when the
|
// Ignore "file not found" errors. This can happen when the
|
||||||
// upstream caller has already cleaned up the file as part of
|
// upstream caller has already cleaned up the file as part of
|
||||||
// its own error handling.
|
// its own error handling.
|
||||||
@@ -145,13 +146,18 @@ impl Attachment {
|
|||||||
|
|
||||||
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
let result: Option<i64> = attachments::table
|
let result: Option<BigDecimal> = attachments::table
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||||
.select(diesel::dsl::sum(attachments::file_size))
|
.select(diesel::dsl::sum(attachments::file_size))
|
||||||
.first(conn)
|
.first(conn)
|
||||||
.expect("Error loading user attachment total size");
|
.expect("Error loading user attachment total size");
|
||||||
result.unwrap_or(0)
|
|
||||||
|
match result.map(|r| r.to_i64()) {
|
||||||
|
Some(Some(r)) => r,
|
||||||
|
Some(None) => i64::MAX,
|
||||||
|
None => 0
|
||||||
|
}
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,13 +174,18 @@ impl Attachment {
|
|||||||
|
|
||||||
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
let result: Option<i64> = attachments::table
|
let result: Option<BigDecimal> = attachments::table
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||||
.select(diesel::dsl::sum(attachments::file_size))
|
.select(diesel::dsl::sum(attachments::file_size))
|
||||||
.first(conn)
|
.first(conn)
|
||||||
.expect("Error loading user attachment total size");
|
.expect("Error loading user attachment total size");
|
||||||
result.unwrap_or(0)
|
|
||||||
|
match result.map(|r| r.to_i64()) {
|
||||||
|
Some(Some(r)) => r,
|
||||||
|
Some(None) => i64::MAX,
|
||||||
|
None => 0
|
||||||
|
}
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -140,7 +140,7 @@ impl AuthRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
|
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
|
||||||
let expiry_time = Utc::now().naive_utc() - chrono::Duration::minutes(5); //after 5 minutes, clients reject the request
|
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request
|
||||||
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
||||||
auth_request.delete(conn).await.ok();
|
auth_request.delete(conn).await.ok();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
|
use crate::util::LowerCase;
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@@ -81,7 +82,7 @@ impl Cipher {
|
|||||||
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
|
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
|
||||||
let mut validation_errors = serde_json::Map::new();
|
let mut validation_errors = serde_json::Map::new();
|
||||||
for (index, cipher) in cipher_data.iter().enumerate() {
|
for (index, cipher) in cipher_data.iter().enumerate() {
|
||||||
if let Some(note) = &cipher.Notes {
|
if let Some(note) = &cipher.notes {
|
||||||
if note.len() > 10_000 {
|
if note.len() > 10_000 {
|
||||||
validation_errors.insert(
|
validation_errors.insert(
|
||||||
format!("Ciphers[{index}].Notes"),
|
format!("Ciphers[{index}].Notes"),
|
||||||
@@ -135,10 +136,6 @@ impl Cipher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
|
||||||
let password_history_json =
|
|
||||||
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
|
||||||
|
|
||||||
// We don't need these values at all for Organizational syncs
|
// We don't need these values at all for Organizational syncs
|
||||||
// Skip any other database calls if this is the case and just return false.
|
// Skip any other database calls if this is the case and just return false.
|
||||||
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
|
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
|
||||||
@@ -153,32 +150,60 @@ impl Cipher {
|
|||||||
(false, false)
|
(false, false)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let fields_json: Vec<_> = self
|
||||||
|
.fields
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| {
|
||||||
|
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
|
||||||
|
.inspect_err(|e| warn!("Error parsing fields {:?}", e))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.map(|d| d.into_iter().map(|d| d.data).collect())
|
||||||
|
.unwrap_or_default();
|
||||||
|
let password_history_json: Vec<_> = self
|
||||||
|
.password_history
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|s| {
|
||||||
|
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
|
||||||
|
.inspect_err(|e| warn!("Error parsing password history {:?}", e))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.map(|d| d.into_iter().map(|d| d.data).collect())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
// Get the type_data or a default to an empty json object '{}'.
|
// Get the type_data or a default to an empty json object '{}'.
|
||||||
// If not passing an empty object, mobile clients will crash.
|
// If not passing an empty object, mobile clients will crash.
|
||||||
let mut type_data_json: Value =
|
let mut type_data_json = serde_json::from_str::<LowerCase<Value>>(&self.data)
|
||||||
serde_json::from_str(&self.data).unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
|
.map(|d| d.data)
|
||||||
|
.unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
|
||||||
|
|
||||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||||
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||||
if self.atype == 1 {
|
if self.atype == 1 {
|
||||||
if type_data_json["Uris"].is_array() {
|
if type_data_json["uris"].is_array() {
|
||||||
let uri = type_data_json["Uris"][0]["Uri"].clone();
|
let uri = type_data_json["uris"][0]["uri"].clone();
|
||||||
type_data_json["Uri"] = uri;
|
type_data_json["uri"] = uri;
|
||||||
} else {
|
} else {
|
||||||
// Upstream always has an Uri key/value
|
// Upstream always has an Uri key/value
|
||||||
type_data_json["Uri"] = Value::Null;
|
type_data_json["uri"] = Value::Null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fix secure note issues when data is `{}`
|
||||||
|
// This breaks at least the native mobile clients
|
||||||
|
if self.atype == 2 && (self.data.eq("{}") || self.data.to_ascii_lowercase().eq("{\"type\":null}")) {
|
||||||
|
type_data_json = json!({"type": 0});
|
||||||
|
}
|
||||||
|
|
||||||
// Clone the type_data and add some default value.
|
// Clone the type_data and add some default value.
|
||||||
let mut data_json = type_data_json.clone();
|
let mut data_json = type_data_json.clone();
|
||||||
|
|
||||||
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
|
||||||
// data_json should always contain the following keys with every atype
|
// data_json should always contain the following keys with every atype
|
||||||
data_json["Fields"] = fields_json.clone();
|
data_json["fields"] = Value::Array(fields_json.clone());
|
||||||
data_json["Name"] = json!(self.name);
|
data_json["name"] = json!(self.name);
|
||||||
data_json["Notes"] = json!(self.notes);
|
data_json["notes"] = json!(self.notes);
|
||||||
data_json["PasswordHistory"] = password_history_json.clone();
|
data_json["passwordHistory"] = Value::Array(password_history_json.clone());
|
||||||
|
|
||||||
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
|
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
|
||||||
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
|
||||||
@@ -187,7 +212,7 @@ impl Cipher {
|
|||||||
Cow::from(Vec::with_capacity(0))
|
Cow::from(Vec::with_capacity(0))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Cow::from(self.get_collections(user_uuid.to_string(), conn).await)
|
Cow::from(self.get_admin_collections(user_uuid.to_string(), conn).await)
|
||||||
};
|
};
|
||||||
|
|
||||||
// There are three types of cipher response models in upstream
|
// There are three types of cipher response models in upstream
|
||||||
@@ -198,48 +223,48 @@ impl Cipher {
|
|||||||
//
|
//
|
||||||
// Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs
|
// Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs
|
||||||
let mut json_object = json!({
|
let mut json_object = json!({
|
||||||
"Object": "cipherDetails",
|
"object": "cipherDetails",
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"CreationDate": format_date(&self.created_at),
|
"creationDate": format_date(&self.created_at),
|
||||||
"RevisionDate": format_date(&self.updated_at),
|
"revisionDate": format_date(&self.updated_at),
|
||||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
"deletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||||
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
"reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
|
||||||
"OrganizationId": self.organization_uuid,
|
"organizationId": self.organization_uuid,
|
||||||
"Key": self.key,
|
"key": self.key,
|
||||||
"Attachments": attachments_json,
|
"attachments": attachments_json,
|
||||||
// We have UseTotp set to true by default within the Organization model.
|
// We have UseTotp set to true by default within the Organization model.
|
||||||
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
|
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
|
||||||
"OrganizationUseTotp": true,
|
"organizationUseTotp": true,
|
||||||
|
|
||||||
// This field is specific to the cipherDetails type.
|
// This field is specific to the cipherDetails type.
|
||||||
"CollectionIds": collection_ids,
|
"collectionIds": collection_ids,
|
||||||
|
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Notes": self.notes,
|
"notes": self.notes,
|
||||||
"Fields": fields_json,
|
"fields": fields_json,
|
||||||
|
|
||||||
"Data": data_json,
|
"data": data_json,
|
||||||
|
|
||||||
"PasswordHistory": password_history_json,
|
"passwordHistory": password_history_json,
|
||||||
|
|
||||||
// All Cipher types are included by default as null, but only the matching one will be populated
|
// All Cipher types are included by default as null, but only the matching one will be populated
|
||||||
"Login": null,
|
"login": null,
|
||||||
"SecureNote": null,
|
"secureNote": null,
|
||||||
"Card": null,
|
"card": null,
|
||||||
"Identity": null,
|
"identity": null,
|
||||||
});
|
});
|
||||||
|
|
||||||
// These values are only needed for user/default syncs
|
// These values are only needed for user/default syncs
|
||||||
// Not during an organizational sync like `get_org_details`
|
// Not during an organizational sync like `get_org_details`
|
||||||
// Skip adding these fields in that case
|
// Skip adding these fields in that case
|
||||||
if sync_type == CipherSyncType::User {
|
if sync_type == CipherSyncType::User {
|
||||||
json_object["FolderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||||
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
|
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
|
||||||
} else {
|
} else {
|
||||||
self.get_folder_uuid(user_uuid, conn).await
|
self.get_folder_uuid(user_uuid, conn).await
|
||||||
});
|
});
|
||||||
json_object["Favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
json_object["favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||||
cipher_sync_data.cipher_favorites.contains(&self.uuid)
|
cipher_sync_data.cipher_favorites.contains(&self.uuid)
|
||||||
} else {
|
} else {
|
||||||
self.is_favorite(user_uuid, conn).await
|
self.is_favorite(user_uuid, conn).await
|
||||||
@@ -247,15 +272,15 @@ impl Cipher {
|
|||||||
// These values are true by default, but can be false if the
|
// These values are true by default, but can be false if the
|
||||||
// cipher belongs to a collection or group where the org owner has enabled
|
// cipher belongs to a collection or group where the org owner has enabled
|
||||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||||
json_object["Edit"] = json!(!read_only);
|
json_object["edit"] = json!(!read_only);
|
||||||
json_object["ViewPassword"] = json!(!hide_passwords);
|
json_object["viewPassword"] = json!(!hide_passwords);
|
||||||
}
|
}
|
||||||
|
|
||||||
let key = match self.atype {
|
let key = match self.atype {
|
||||||
1 => "Login",
|
1 => "login",
|
||||||
2 => "SecureNote",
|
2 => "secureNote",
|
||||||
3 => "Card",
|
3 => "card",
|
||||||
4 => "Identity",
|
4 => "identity",
|
||||||
_ => panic!("Wrong type"),
|
_ => panic!("Wrong type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -273,7 +298,16 @@ impl Cipher {
|
|||||||
None => {
|
None => {
|
||||||
// Belongs to Organization, need to update affected users
|
// Belongs to Organization, need to update affected users
|
||||||
if let Some(ref org_uuid) = self.organization_uuid {
|
if let Some(ref org_uuid) = self.organization_uuid {
|
||||||
for user_org in UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await.iter() {
|
// users having access to the collection
|
||||||
|
let mut collection_users =
|
||||||
|
UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await;
|
||||||
|
if CONFIG.org_groups_enabled() {
|
||||||
|
// members of a group having access to the collection
|
||||||
|
let group_users =
|
||||||
|
UserOrganization::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await;
|
||||||
|
collection_users.extend(group_users);
|
||||||
|
}
|
||||||
|
for user_org in collection_users {
|
||||||
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
User::update_uuid_revision(&user_org.user_uuid, conn).await;
|
||||||
user_uuids.push(user_org.user_uuid.clone())
|
user_uuids.push(user_org.user_uuid.clone())
|
||||||
}
|
}
|
||||||
@@ -352,7 +386,7 @@ impl Cipher {
|
|||||||
pub async fn purge_trash(conn: &mut DbConn) {
|
pub async fn purge_trash(conn: &mut DbConn) {
|
||||||
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
let dt = now - Duration::days(auto_delete_days);
|
let dt = now - TimeDelta::try_days(auto_delete_days).unwrap();
|
||||||
for cipher in Self::find_deleted_before(&dt, conn).await {
|
for cipher in Self::find_deleted_before(&dt, conn).await {
|
||||||
cipher.delete(conn).await.ok();
|
cipher.delete(conn).await.ok();
|
||||||
}
|
}
|
||||||
@@ -417,9 +451,12 @@ impl Cipher {
|
|||||||
cipher_sync_data: Option<&CipherSyncData>,
|
cipher_sync_data: Option<&CipherSyncData>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
|
if !CONFIG.org_groups_enabled() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if let Some(ref org_uuid) = self.organization_uuid {
|
if let Some(ref org_uuid) = self.organization_uuid {
|
||||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||||
return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some();
|
return cipher_sync_data.user_group_full_access_for_organizations.contains(org_uuid);
|
||||||
} else {
|
} else {
|
||||||
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
|
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
|
||||||
}
|
}
|
||||||
@@ -512,6 +549,9 @@ impl Cipher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> {
|
async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> {
|
||||||
|
if !CONFIG.org_groups_enabled() {
|
||||||
|
return Vec::new();
|
||||||
|
}
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
ciphers::table
|
ciphers::table
|
||||||
.filter(ciphers::uuid.eq(&self.uuid))
|
.filter(ciphers::uuid.eq(&self.uuid))
|
||||||
@@ -593,50 +633,84 @@ impl Cipher {
|
|||||||
// result, those ciphers will not appear in "My Vault" for the org
|
// result, those ciphers will not appear in "My Vault" for the org
|
||||||
// owner/admin, but they can still be accessed via the org vault view.
|
// owner/admin, but they can still be accessed via the org vault view.
|
||||||
pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! {conn: {
|
if CONFIG.org_groups_enabled() {
|
||||||
let mut query = ciphers::table
|
db_run! {conn: {
|
||||||
.left_join(ciphers_collections::table.on(
|
let mut query = ciphers::table
|
||||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
.left_join(ciphers_collections::table.on(
|
||||||
))
|
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||||
.left_join(users_organizations::table.on(
|
))
|
||||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
.left_join(users_organizations::table.on(
|
||||||
.and(users_organizations::user_uuid.eq(user_uuid))
|
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||||
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||||
))
|
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||||
.left_join(users_collections::table.on(
|
))
|
||||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
.left_join(users_collections::table.on(
|
||||||
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||||
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
||||||
))
|
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
||||||
.left_join(groups_users::table.on(
|
))
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
.left_join(groups_users::table.on(
|
||||||
))
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
.left_join(groups::table.on(
|
))
|
||||||
groups::uuid.eq(groups_users::groups_uuid)
|
.left_join(groups::table.on(
|
||||||
))
|
groups::uuid.eq(groups_users::groups_uuid)
|
||||||
.left_join(collections_groups::table.on(
|
))
|
||||||
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
|
.left_join(collections_groups::table.on(
|
||||||
collections_groups::groups_uuid.eq(groups::uuid)
|
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||||
)
|
collections_groups::groups_uuid.eq(groups::uuid)
|
||||||
))
|
)
|
||||||
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
))
|
||||||
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
||||||
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
||||||
.or_filter(groups::access_all.eq(true)) // Access via groups
|
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||||
.or_filter(collections_groups::collections_uuid.is_not_null()) // Access via groups
|
.or_filter(groups::access_all.eq(true)) // Access via groups
|
||||||
.into_boxed();
|
.or_filter(collections_groups::collections_uuid.is_not_null()) // Access via groups
|
||||||
|
.into_boxed();
|
||||||
|
|
||||||
if !visible_only {
|
if !visible_only {
|
||||||
query = query.or_filter(
|
query = query.or_filter(
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
query
|
query
|
||||||
.select(ciphers::all_columns)
|
.select(ciphers::all_columns)
|
||||||
.distinct()
|
.distinct()
|
||||||
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||||
}}
|
}}
|
||||||
|
} else {
|
||||||
|
db_run! {conn: {
|
||||||
|
let mut query = ciphers::table
|
||||||
|
.left_join(ciphers_collections::table.on(
|
||||||
|
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||||
|
))
|
||||||
|
.left_join(users_organizations::table.on(
|
||||||
|
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||||
|
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||||
|
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||||
|
))
|
||||||
|
.left_join(users_collections::table.on(
|
||||||
|
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||||
|
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
||||||
|
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
||||||
|
))
|
||||||
|
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
||||||
|
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
||||||
|
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||||
|
.into_boxed();
|
||||||
|
|
||||||
|
if !visible_only {
|
||||||
|
query = query.or_filter(
|
||||||
|
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
query
|
||||||
|
.select(ciphers::all_columns)
|
||||||
|
.distinct()
|
||||||
|
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find all ciphers visible to the specified user.
|
// Find all ciphers visible to the specified user.
|
||||||
@@ -705,30 +779,123 @@ impl Cipher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
|
pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
|
||||||
db_run! {conn: {
|
if CONFIG.org_groups_enabled() {
|
||||||
ciphers_collections::table
|
db_run! {conn: {
|
||||||
.inner_join(collections::table.on(
|
ciphers_collections::table
|
||||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||||
))
|
.inner_join(collections::table.on(
|
||||||
.inner_join(users_organizations::table.on(
|
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||||
users_organizations::org_uuid.eq(collections::org_uuid).and(
|
))
|
||||||
users_organizations::user_uuid.eq(user_id.clone())
|
.left_join(users_organizations::table.on(
|
||||||
)
|
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||||
))
|
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||||
.left_join(users_collections::table.on(
|
))
|
||||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
|
.left_join(users_collections::table.on(
|
||||||
users_collections::user_uuid.eq(user_id.clone())
|
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||||
)
|
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||||
))
|
))
|
||||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
.left_join(groups_users::table.on(
|
||||||
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
users_organizations::access_all.eq(true).or( // User has access all
|
))
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
|
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||||
)
|
.left_join(collections_groups::table.on(
|
||||||
))
|
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
|
||||||
.select(ciphers_collections::collection_uuid)
|
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||||
.load::<String>(conn).unwrap_or_default()
|
))
|
||||||
}}
|
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||||
|
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||||
|
.and(users_collections::read_only.eq(false)))
|
||||||
|
.or(groups::access_all.eq(true)) // Access via groups
|
||||||
|
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
|
||||||
|
.and(collections_groups::read_only.eq(false)))
|
||||||
|
)
|
||||||
|
.select(ciphers_collections::collection_uuid)
|
||||||
|
.load::<String>(conn).unwrap_or_default()
|
||||||
|
}}
|
||||||
|
} else {
|
||||||
|
db_run! {conn: {
|
||||||
|
ciphers_collections::table
|
||||||
|
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||||
|
.inner_join(collections::table.on(
|
||||||
|
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||||
|
))
|
||||||
|
.inner_join(users_organizations::table.on(
|
||||||
|
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||||
|
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||||
|
))
|
||||||
|
.left_join(users_collections::table.on(
|
||||||
|
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||||
|
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||||
|
))
|
||||||
|
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||||
|
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||||
|
.and(users_collections::read_only.eq(false)))
|
||||||
|
)
|
||||||
|
.select(ciphers_collections::collection_uuid)
|
||||||
|
.load::<String>(conn).unwrap_or_default()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_admin_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
|
||||||
|
if CONFIG.org_groups_enabled() {
|
||||||
|
db_run! {conn: {
|
||||||
|
ciphers_collections::table
|
||||||
|
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||||
|
.inner_join(collections::table.on(
|
||||||
|
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||||
|
))
|
||||||
|
.left_join(users_organizations::table.on(
|
||||||
|
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||||
|
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||||
|
))
|
||||||
|
.left_join(users_collections::table.on(
|
||||||
|
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||||
|
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||||
|
))
|
||||||
|
.left_join(groups_users::table.on(
|
||||||
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
|
))
|
||||||
|
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||||
|
.left_join(collections_groups::table.on(
|
||||||
|
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
|
||||||
|
.and(collections_groups::groups_uuid.eq(groups::uuid))
|
||||||
|
))
|
||||||
|
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||||
|
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||||
|
.and(users_collections::read_only.eq(false)))
|
||||||
|
.or(groups::access_all.eq(true)) // Access via groups
|
||||||
|
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
|
||||||
|
.and(collections_groups::read_only.eq(false)))
|
||||||
|
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
|
||||||
|
)
|
||||||
|
.select(ciphers_collections::collection_uuid)
|
||||||
|
.load::<String>(conn).unwrap_or_default()
|
||||||
|
}}
|
||||||
|
} else {
|
||||||
|
db_run! {conn: {
|
||||||
|
ciphers_collections::table
|
||||||
|
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||||
|
.inner_join(collections::table.on(
|
||||||
|
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||||
|
))
|
||||||
|
.inner_join(users_organizations::table.on(
|
||||||
|
users_organizations::org_uuid.eq(collections::org_uuid)
|
||||||
|
.and(users_organizations::user_uuid.eq(user_id.clone()))
|
||||||
|
))
|
||||||
|
.left_join(users_collections::table.on(
|
||||||
|
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
|
||||||
|
.and(users_collections::user_uuid.eq(user_id.clone()))
|
||||||
|
))
|
||||||
|
.filter(users_organizations::access_all.eq(true) // User has access all
|
||||||
|
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
|
||||||
|
.and(users_collections::read_only.eq(false)))
|
||||||
|
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
|
||||||
|
)
|
||||||
|
.select(ciphers_collections::collection_uuid)
|
||||||
|
.load::<String>(conn).unwrap_or_default()
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a Vec with (cipher_uuid, collection_uuid)
|
/// Return a Vec with (cipher_uuid, collection_uuid)
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
|
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||||
|
use crate::CONFIG;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
@@ -48,11 +49,11 @@ impl Collection {
|
|||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"ExternalId": self.external_id,
|
"externalId": self.external_id,
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"OrganizationId": self.org_uuid,
|
"organizationId": self.org_uuid,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Object": "collection",
|
"object": "collection",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,11 +97,20 @@ impl Collection {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut json_object = self.to_json();
|
let mut json_object = self.to_json();
|
||||||
json_object["Object"] = json!("collectionDetails");
|
json_object["object"] = json!("collectionDetails");
|
||||||
json_object["ReadOnly"] = json!(read_only);
|
json_object["readOnly"] = json!(read_only);
|
||||||
json_object["HidePasswords"] = json!(hide_passwords);
|
json_object["hidePasswords"] = json!(hide_passwords);
|
||||||
json_object
|
json_object
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
||||||
|
org_user.has_status(UserOrgStatus::Confirmed)
|
||||||
|
&& (org_user.has_full_access()
|
||||||
|
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|
||||||
|
|| (CONFIG.org_groups_enabled()
|
||||||
|
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|
||||||
|
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
@@ -181,58 +191,74 @@ impl Collection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
if CONFIG.org_groups_enabled() {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.left_join(users_collections::table.on(
|
collections::table
|
||||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
.left_join(users_collections::table.on(
|
||||||
users_collections::user_uuid.eq(user_uuid.clone())
|
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||||
|
users_collections::user_uuid.eq(user_uuid.clone())
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.left_join(users_organizations::table.on(
|
||||||
|
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||||
|
users_organizations::user_uuid.eq(user_uuid.clone())
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.left_join(groups_users::table.on(
|
||||||
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
|
))
|
||||||
|
.left_join(groups::table.on(
|
||||||
|
groups::uuid.eq(groups_users::groups_uuid)
|
||||||
|
))
|
||||||
|
.left_join(collections_groups::table.on(
|
||||||
|
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||||
|
collections_groups::collections_uuid.eq(collections::uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.filter(
|
||||||
|
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||||
)
|
)
|
||||||
))
|
.filter(
|
||||||
.left_join(users_organizations::table.on(
|
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
users_organizations::access_all.eq(true) // access_all in Organization
|
||||||
users_organizations::user_uuid.eq(user_uuid.clone())
|
).or(
|
||||||
)
|
groups::access_all.eq(true) // access_all in groups
|
||||||
))
|
).or( // access via groups
|
||||||
.left_join(groups_users::table.on(
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
collections_groups::collections_uuid.is_not_null()
|
||||||
))
|
)
|
||||||
.left_join(groups::table.on(
|
|
||||||
groups::uuid.eq(groups_users::groups_uuid)
|
|
||||||
))
|
|
||||||
.left_join(collections_groups::table.on(
|
|
||||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
|
||||||
collections_groups::collections_uuid.eq(collections::uuid)
|
|
||||||
)
|
|
||||||
))
|
|
||||||
.filter(
|
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
|
||||||
)
|
|
||||||
.filter(
|
|
||||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
|
||||||
users_organizations::access_all.eq(true) // access_all in Organization
|
|
||||||
).or(
|
|
||||||
groups::access_all.eq(true) // access_all in groups
|
|
||||||
).or( // access via groups
|
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
|
||||||
collections_groups::collections_uuid.is_not_null()
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
.select(collections::all_columns)
|
||||||
.select(collections::all_columns)
|
.distinct()
|
||||||
.distinct()
|
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
|
||||||
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
|
}}
|
||||||
}}
|
} else {
|
||||||
}
|
db_run! { conn: {
|
||||||
|
collections::table
|
||||||
// Check if a user has access to a specific collection
|
.left_join(users_collections::table.on(
|
||||||
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
|
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||||
// For now this is a good solution without making to much changes.
|
users_collections::user_uuid.eq(user_uuid.clone())
|
||||||
pub async fn has_access_by_collection_and_user_uuid(
|
)
|
||||||
collection_uuid: &str,
|
))
|
||||||
user_uuid: &str,
|
.left_join(users_organizations::table.on(
|
||||||
conn: &mut DbConn,
|
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||||
) -> bool {
|
users_organizations::user_uuid.eq(user_uuid.clone())
|
||||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
|
)
|
||||||
|
))
|
||||||
|
.filter(
|
||||||
|
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||||
|
)
|
||||||
|
.filter(
|
||||||
|
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||||
|
users_organizations::access_all.eq(true) // access_all in Organization
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.select(collections::all_columns)
|
||||||
|
.distinct()
|
||||||
|
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
@@ -277,91 +303,132 @@ impl Collection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
if CONFIG.org_groups_enabled() {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.left_join(users_collections::table.on(
|
collections::table
|
||||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
.left_join(users_collections::table.on(
|
||||||
users_collections::user_uuid.eq(user_uuid.clone())
|
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||||
)
|
users_collections::user_uuid.eq(user_uuid.clone())
|
||||||
))
|
|
||||||
.left_join(users_organizations::table.on(
|
|
||||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
|
||||||
users_organizations::user_uuid.eq(user_uuid)
|
|
||||||
)
|
|
||||||
))
|
|
||||||
.left_join(groups_users::table.on(
|
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
|
||||||
))
|
|
||||||
.left_join(groups::table.on(
|
|
||||||
groups::uuid.eq(groups_users::groups_uuid)
|
|
||||||
))
|
|
||||||
.left_join(collections_groups::table.on(
|
|
||||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
|
||||||
collections_groups::collections_uuid.eq(collections::uuid)
|
|
||||||
)
|
|
||||||
))
|
|
||||||
.filter(collections::uuid.eq(uuid))
|
|
||||||
.filter(
|
|
||||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
|
||||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
|
||||||
)).or(
|
|
||||||
groups::access_all.eq(true) // access_all in groups
|
|
||||||
).or( // access via groups
|
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
|
||||||
collections_groups::collections_uuid.is_not_null()
|
|
||||||
)
|
)
|
||||||
)
|
))
|
||||||
).select(collections::all_columns)
|
.left_join(users_organizations::table.on(
|
||||||
.first::<CollectionDb>(conn).ok()
|
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||||
.from_db()
|
users_organizations::user_uuid.eq(user_uuid)
|
||||||
}}
|
)
|
||||||
|
))
|
||||||
|
.left_join(groups_users::table.on(
|
||||||
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
|
))
|
||||||
|
.left_join(groups::table.on(
|
||||||
|
groups::uuid.eq(groups_users::groups_uuid)
|
||||||
|
))
|
||||||
|
.left_join(collections_groups::table.on(
|
||||||
|
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
||||||
|
collections_groups::collections_uuid.eq(collections::uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.filter(collections::uuid.eq(uuid))
|
||||||
|
.filter(
|
||||||
|
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||||
|
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||||
|
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||||
|
)).or(
|
||||||
|
groups::access_all.eq(true) // access_all in groups
|
||||||
|
).or( // access via groups
|
||||||
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
||||||
|
collections_groups::collections_uuid.is_not_null()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
).select(collections::all_columns)
|
||||||
|
.first::<CollectionDb>(conn).ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
|
} else {
|
||||||
|
db_run! { conn: {
|
||||||
|
collections::table
|
||||||
|
.left_join(users_collections::table.on(
|
||||||
|
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||||
|
users_collections::user_uuid.eq(user_uuid.clone())
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.left_join(users_organizations::table.on(
|
||||||
|
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||||
|
users_organizations::user_uuid.eq(user_uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.filter(collections::uuid.eq(uuid))
|
||||||
|
.filter(
|
||||||
|
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||||
|
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||||
|
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||||
|
))
|
||||||
|
).select(collections::all_columns)
|
||||||
|
.first::<CollectionDb>(conn).ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||||
let user_uuid = user_uuid.to_string();
|
let user_uuid = user_uuid.to_string();
|
||||||
db_run! { conn: {
|
if CONFIG.org_groups_enabled() {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.left_join(users_collections::table.on(
|
collections::table
|
||||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
.filter(collections::uuid.eq(&self.uuid))
|
||||||
users_collections::user_uuid.eq(user_uuid.clone())
|
.inner_join(users_organizations::table.on(
|
||||||
)
|
collections::org_uuid.eq(users_organizations::org_uuid)
|
||||||
))
|
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||||
.left_join(users_organizations::table.on(
|
))
|
||||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
.left_join(users_collections::table.on(
|
||||||
users_organizations::user_uuid.eq(user_uuid)
|
users_collections::collection_uuid.eq(collections::uuid)
|
||||||
)
|
.and(users_collections::user_uuid.eq(user_uuid))
|
||||||
))
|
))
|
||||||
.left_join(groups_users::table.on(
|
.left_join(groups_users::table.on(
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
))
|
))
|
||||||
.left_join(groups::table.on(
|
.left_join(groups::table.on(
|
||||||
groups::uuid.eq(groups_users::groups_uuid)
|
groups::uuid.eq(groups_users::groups_uuid)
|
||||||
))
|
))
|
||||||
.left_join(collections_groups::table.on(
|
.left_join(collections_groups::table.on(
|
||||||
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
|
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||||
collections_groups::collections_uuid.eq(collections::uuid)
|
.and(collections_groups::collections_uuid.eq(collections::uuid))
|
||||||
)
|
))
|
||||||
))
|
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||||
.filter(collections::uuid.eq(&self.uuid))
|
.or(users_organizations::access_all.eq(true)) // access_all via membership
|
||||||
.filter(
|
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
|
||||||
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::read_only.eq(false)).or(// Directly accessed collection
|
.and(users_collections::read_only.eq(false)))
|
||||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
.or(groups::access_all.eq(true)) // access_all via group
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
.or(collections_groups::collections_uuid.is_not_null() // write access given via group
|
||||||
)).or(
|
.and(collections_groups::read_only.eq(false)))
|
||||||
groups::access_all.eq(true) // access_all in groups
|
|
||||||
).or( // access via groups
|
|
||||||
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
|
|
||||||
collections_groups::collections_uuid.is_not_null().and(
|
|
||||||
collections_groups::read_only.eq(false))
|
|
||||||
)
|
)
|
||||||
)
|
.count()
|
||||||
)
|
.first::<i64>(conn)
|
||||||
.count()
|
.ok()
|
||||||
.first::<i64>(conn)
|
.unwrap_or(0) != 0
|
||||||
.ok()
|
}}
|
||||||
.unwrap_or(0) != 0
|
} else {
|
||||||
}}
|
db_run! { conn: {
|
||||||
|
collections::table
|
||||||
|
.filter(collections::uuid.eq(&self.uuid))
|
||||||
|
.inner_join(users_organizations::table.on(
|
||||||
|
collections::org_uuid.eq(users_organizations::org_uuid)
|
||||||
|
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
|
||||||
|
))
|
||||||
|
.left_join(users_collections::table.on(
|
||||||
|
users_collections::collection_uuid.eq(collections::uuid)
|
||||||
|
.and(users_collections::user_uuid.eq(user_uuid))
|
||||||
|
))
|
||||||
|
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||||
|
.or(users_organizations::access_all.eq(true)) // access_all via membership
|
||||||
|
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
|
||||||
|
.and(users_collections::read_only.eq(false)))
|
||||||
|
)
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.ok()
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||||
@@ -581,7 +648,7 @@ impl CollectionUser {
|
|||||||
|
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
for user in collectionusers {
|
for user in collectionusers {
|
||||||
diesel::delete(users_collections::table.filter(
|
let _: () = diesel::delete(users_collections::table.filter(
|
||||||
users_collections::user_uuid.eq(user_uuid)
|
users_collections::user_uuid.eq(user_uuid)
|
||||||
.and(users_collections::collection_uuid.eq(user.collection_uuid))
|
.and(users_collections::collection_uuid.eq(user.collection_uuid))
|
||||||
))
|
))
|
||||||
@@ -591,6 +658,10 @@ impl CollectionUser {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||||
|
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
|
|||||||
@@ -59,12 +59,7 @@ impl Device {
|
|||||||
self.twofactor_remember = None;
|
self.twofactor_remember = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn refresh_tokens(
|
pub fn refresh_tokens(&mut self, user: &super::User, scope: Vec<String>) -> (String, i64) {
|
||||||
&mut self,
|
|
||||||
user: &super::User,
|
|
||||||
orgs: Vec<super::UserOrganization>,
|
|
||||||
scope: Vec<String>,
|
|
||||||
) -> (String, i64) {
|
|
||||||
// If there is no refresh token, we create one
|
// If there is no refresh token, we create one
|
||||||
if self.refresh_token.is_empty() {
|
if self.refresh_token.is_empty() {
|
||||||
use data_encoding::BASE64URL;
|
use data_encoding::BASE64URL;
|
||||||
@@ -72,13 +67,20 @@ impl Device {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update the expiration of the device and the last update date
|
// Update the expiration of the device and the last update date
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
self.updated_at = time_now;
|
self.updated_at = time_now.naive_utc();
|
||||||
|
|
||||||
let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
// ---
|
||||||
let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||||
let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||||
let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// ---
|
||||||
|
// fn arg: orgs: Vec<super::UserOrganization>,
|
||||||
|
// ---
|
||||||
|
// let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect();
|
||||||
|
// let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect();
|
||||||
|
// let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect();
|
||||||
|
// let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect();
|
||||||
|
|
||||||
// Create the JWT claims struct, to send to the client
|
// Create the JWT claims struct, to send to the client
|
||||||
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
|
||||||
@@ -93,11 +95,16 @@ impl Device {
|
|||||||
email: user.email.clone(),
|
email: user.email.clone(),
|
||||||
email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(),
|
email_verified: !CONFIG.mail_enabled() || user.verified_at.is_some(),
|
||||||
|
|
||||||
orgowner,
|
// ---
|
||||||
orgadmin,
|
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||||
orguser,
|
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
|
||||||
orgmanager,
|
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
|
||||||
|
// See: https://github.com/dani-garcia/vaultwarden/issues/4156
|
||||||
|
// ---
|
||||||
|
// orgowner,
|
||||||
|
// orgadmin,
|
||||||
|
// orguser,
|
||||||
|
// orgmanager,
|
||||||
sstamp: user.security_stamp.clone(),
|
sstamp: user.security_stamp.clone(),
|
||||||
device: self.uuid.clone(),
|
device: self.uuid.clone(),
|
||||||
scope,
|
scope,
|
||||||
@@ -106,6 +113,14 @@ impl Device {
|
|||||||
|
|
||||||
(encode_jwt(&claims), DEFAULT_VALIDITY.num_seconds())
|
(encode_jwt(&claims), DEFAULT_VALIDITY.num_seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_push_device(&self) -> bool {
|
||||||
|
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_registered(&self) -> bool {
|
||||||
|
self.push_uuid.is_some()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
@@ -203,6 +218,7 @@ impl Device {
|
|||||||
.from_db()
|
.from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
devices::table
|
devices::table
|
||||||
|
|||||||
@@ -58,11 +58,11 @@ impl EmergencyAccess {
|
|||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Status": self.status,
|
"status": self.status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"WaitTimeDays": self.wait_time_days,
|
"waitTimeDays": self.wait_time_days,
|
||||||
"Object": "emergencyAccess",
|
"object": "emergencyAccess",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,36 +70,43 @@ impl EmergencyAccess {
|
|||||||
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
|
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Status": self.status,
|
"status": self.status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"WaitTimeDays": self.wait_time_days,
|
"waitTimeDays": self.wait_time_days,
|
||||||
"GrantorId": grantor_user.uuid,
|
"grantorId": grantor_user.uuid,
|
||||||
"Email": grantor_user.email,
|
"email": grantor_user.email,
|
||||||
"Name": grantor_user.name,
|
"name": grantor_user.name,
|
||||||
"Object": "emergencyAccessGrantorDetails",
|
"object": "emergencyAccessGrantorDetails",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
|
||||||
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
||||||
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
|
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
|
||||||
} else if let Some(email) = self.email.as_deref() {
|
} else if let Some(email) = self.email.as_deref() {
|
||||||
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
|
match User::find_by_mail(email, conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => {
|
||||||
|
// remove outstanding invitations which should not exist
|
||||||
|
let _ = Self::delete_all_by_grantee_email(email, conn).await;
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
None
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
json!({
|
Some(json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Status": self.status,
|
"status": self.status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"WaitTimeDays": self.wait_time_days,
|
"waitTimeDays": self.wait_time_days,
|
||||||
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
|
"granteeId": grantee_user.uuid,
|
||||||
"Email": grantee_user.as_ref().map_or("", |u| &u.email),
|
"email": grantee_user.email,
|
||||||
"Name": grantee_user.as_ref().map_or("", |u| &u.name),
|
"name": grantee_user.name,
|
||||||
"Object": "emergencyAccessGranteeDetails",
|
"object": "emergencyAccessGranteeDetails",
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,7 +181,7 @@ impl EmergencyAccess {
|
|||||||
// Update the grantee so that it will refresh it's status.
|
// Update the grantee so that it will refresh it's status.
|
||||||
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
|
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
|
||||||
self.status = status;
|
self.status = status;
|
||||||
self.updated_at = date.to_owned();
|
date.clone_into(&mut self.updated_at);
|
||||||
|
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
crate::util::retry(|| {
|
||||||
@@ -192,7 +199,7 @@ impl EmergencyAccess {
|
|||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
self.last_notification_at = Some(date.to_owned());
|
self.last_notification_at = Some(date.to_owned());
|
||||||
self.updated_at = date.to_owned();
|
date.clone_into(&mut self.updated_at);
|
||||||
|
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
crate::util::retry(|| {
|
||||||
@@ -214,6 +221,13 @@ impl EmergencyAccess {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
|
||||||
|
ea.delete(conn).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
||||||
|
|
||||||
@@ -224,15 +238,6 @@ impl EmergencyAccess {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
|
||||||
db_run! { conn: {
|
|
||||||
emergency_access::table
|
|
||||||
.filter(emergency_access::uuid.eq(uuid))
|
|
||||||
.first::<EmergencyAccessDb>(conn)
|
|
||||||
.ok().from_db()
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||||
grantor_uuid: &str,
|
grantor_uuid: &str,
|
||||||
grantee_uuid: &str,
|
grantee_uuid: &str,
|
||||||
@@ -267,6 +272,26 @@ impl EmergencyAccess {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_uuid_and_grantee_uuid(uuid: &str, grantee_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
emergency_access::table
|
||||||
|
.filter(emergency_access::uuid.eq(uuid))
|
||||||
|
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
|
||||||
|
.first::<EmergencyAccessDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_uuid_and_grantee_email(uuid: &str, grantee_email: &str, conn: &mut DbConn) -> Option<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
emergency_access::table
|
||||||
|
.filter(emergency_access::uuid.eq(uuid))
|
||||||
|
.filter(emergency_access::email.eq(grantee_email))
|
||||||
|
.first::<EmergencyAccessDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
emergency_access::table
|
emergency_access::table
|
||||||
@@ -285,6 +310,15 @@ impl EmergencyAccess {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
emergency_access::table
|
||||||
|
.filter(emergency_access::email.eq(grantee_email))
|
||||||
|
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
|
||||||
|
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
emergency_access::table
|
emergency_access::table
|
||||||
@@ -292,6 +326,21 @@ impl EmergencyAccess {
|
|||||||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
|
||||||
|
err!("User email does not match invite.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.status == EmergencyAccessStatus::Accepted as i32 {
|
||||||
|
err!("Emergency contact already accepted.");
|
||||||
|
}
|
||||||
|
|
||||||
|
self.status = EmergencyAccessStatus::Accepted as i32;
|
||||||
|
self.grantee_uuid = Some(String::from(grantee_uuid));
|
||||||
|
self.email = None;
|
||||||
|
self.save(conn).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// endregion
|
// endregion
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use serde_json::Value;
|
|||||||
|
|
||||||
use crate::{api::EmptyResult, error::MapResult, CONFIG};
|
use crate::{api::EmptyResult, error::MapResult, CONFIG};
|
||||||
|
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||||
|
|
||||||
// https://bitwarden.com/help/event-logs/
|
// https://bitwarden.com/help/event-logs/
|
||||||
|
|
||||||
@@ -316,7 +316,7 @@ impl Event {
|
|||||||
|
|
||||||
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
|
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
|
||||||
if let Some(days_to_retain) = CONFIG.events_days_retain() {
|
if let Some(days_to_retain) = CONFIG.events_days_retain() {
|
||||||
let dt = Utc::now().naive_utc() - Duration::days(days_to_retain);
|
let dt = Utc::now().naive_utc() - TimeDelta::try_days(days_to_retain).unwrap();
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(event::table.filter(event::event_date.lt(dt)))
|
diesel::delete(event::table.filter(event::event_date.lt(dt)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
|
|||||||
@@ -43,10 +43,10 @@ impl Folder {
|
|||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"RevisionDate": format_date(&self.updated_at),
|
"revisionDate": format_date(&self.updated_at),
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Object": "folder",
|
"object": "folder",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,14 +58,14 @@ impl Group {
|
|||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"OrganizationId": self.organizations_uuid,
|
"organizationId": self.organizations_uuid,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"AccessAll": self.access_all,
|
"accessAll": self.access_all,
|
||||||
"ExternalId": self.external_id,
|
"externalId": self.external_id,
|
||||||
"CreationDate": format_date(&self.creation_date),
|
"creationDate": format_date(&self.creation_date),
|
||||||
"RevisionDate": format_date(&self.revision_date),
|
"revisionDate": format_date(&self.revision_date),
|
||||||
"Object": "group"
|
"object": "group"
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,21 +75,21 @@ impl Group {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|entry| {
|
.map(|entry| {
|
||||||
json!({
|
json!({
|
||||||
"Id": entry.collections_uuid,
|
"id": entry.collections_uuid,
|
||||||
"ReadOnly": entry.read_only,
|
"readOnly": entry.read_only,
|
||||||
"HidePasswords": entry.hide_passwords
|
"hidePasswords": entry.hide_passwords
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"OrganizationId": self.organizations_uuid,
|
"organizationId": self.organizations_uuid,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"AccessAll": self.access_all,
|
"accessAll": self.access_all,
|
||||||
"ExternalId": self.external_id,
|
"externalId": self.external_id,
|
||||||
"Collections": collections_groups,
|
"collections": collections_groups,
|
||||||
"Object": "groupDetails"
|
"object": "groupDetails"
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -203,10 +203,11 @@ impl Group {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn find_by_external_id(id: &str, conn: &mut DbConn) -> Option<Self> {
|
pub async fn find_by_external_id_and_org(external_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
groups::table
|
groups::table
|
||||||
.filter(groups::external_id.eq(id))
|
.filter(groups::external_id.eq(external_id))
|
||||||
|
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||||
.first::<GroupDb>(conn)
|
.first::<GroupDb>(conn)
|
||||||
.ok()
|
.ok()
|
||||||
.from_db()
|
.from_db()
|
||||||
@@ -486,6 +487,39 @@ impl GroupUser {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn has_access_to_collection_by_member(
|
||||||
|
collection_uuid: &str,
|
||||||
|
member_uuid: &str,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
) -> bool {
|
||||||
|
db_run! { conn: {
|
||||||
|
groups_users::table
|
||||||
|
.inner_join(collections_groups::table.on(
|
||||||
|
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||||
|
))
|
||||||
|
.filter(collections_groups::collections_uuid.eq(collection_uuid))
|
||||||
|
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn has_full_access_by_member(org_uuid: &str, member_uuid: &str, conn: &mut DbConn) -> bool {
|
||||||
|
db_run! { conn: {
|
||||||
|
groups_users::table
|
||||||
|
.inner_join(groups::table.on(
|
||||||
|
groups::uuid.eq(groups_users::groups_uuid)
|
||||||
|
))
|
||||||
|
.filter(groups::organizations_uuid.eq(org_uuid))
|
||||||
|
.filter(groups::access_all.eq(true))
|
||||||
|
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn update_user_revision(&self, conn: &mut DbConn) {
|
pub async fn update_user_revision(&self, conn: &mut DbConn) {
|
||||||
match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await {
|
match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await {
|
||||||
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use serde_json::Value;
|
|||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
use crate::util::UpCase;
|
|
||||||
|
|
||||||
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
|
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
|
||||||
|
|
||||||
@@ -39,16 +38,18 @@ pub enum OrgPolicyType {
|
|||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
|
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct SendOptionsPolicyData {
|
pub struct SendOptionsPolicyData {
|
||||||
pub DisableHideEmail: bool,
|
#[serde(rename = "disableHideEmail", alias = "DisableHideEmail")]
|
||||||
|
pub disable_hide_email: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
|
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct ResetPasswordDataModel {
|
pub struct ResetPasswordDataModel {
|
||||||
pub AutoEnrollEnabled: bool,
|
#[serde(rename = "autoEnrollEnabled", alias = "AutoEnrollEnabled")]
|
||||||
|
pub auto_enroll_enabled: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
|
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
|
||||||
@@ -78,12 +79,12 @@ impl OrgPolicy {
|
|||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"OrganizationId": self.org_uuid,
|
"organizationId": self.org_uuid,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"Data": data_json,
|
"data": data_json,
|
||||||
"Enabled": self.enabled,
|
"enabled": self.enabled,
|
||||||
"Object": "policy",
|
"object": "policy",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -114,7 +115,7 @@ impl OrgPolicy {
|
|||||||
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
|
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
|
||||||
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||||
// not support multiple constraints on ON CONFLICT clauses.
|
// not support multiple constraints on ON CONFLICT clauses.
|
||||||
diesel::delete(
|
let _: () = diesel::delete(
|
||||||
org_policies::table
|
org_policies::table
|
||||||
.filter(org_policies::org_uuid.eq(&self.org_uuid))
|
.filter(org_policies::org_uuid.eq(&self.org_uuid))
|
||||||
.filter(org_policies::atype.eq(&self.atype)),
|
.filter(org_policies::atype.eq(&self.atype)),
|
||||||
@@ -307,9 +308,9 @@ impl OrgPolicy {
|
|||||||
|
|
||||||
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
|
||||||
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
|
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
|
||||||
Some(policy) => match serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) {
|
Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) {
|
||||||
Ok(opts) => {
|
Ok(opts) => {
|
||||||
return policy.enabled && opts.data.AutoEnrollEnabled;
|
return policy.enabled && opts.auto_enroll_enabled;
|
||||||
}
|
}
|
||||||
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
|
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
|
||||||
},
|
},
|
||||||
@@ -327,9 +328,9 @@ impl OrgPolicy {
|
|||||||
{
|
{
|
||||||
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
|
||||||
if user.atype < UserOrgType::Admin {
|
if user.atype < UserOrgType::Admin {
|
||||||
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) {
|
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
|
||||||
Ok(opts) => {
|
Ok(opts) => {
|
||||||
if opts.data.DisableHideEmail {
|
if opts.disable_hide_email {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -340,4 +341,11 @@ impl OrgPolicy {
|
|||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn is_enabled_by_org(org_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool {
|
||||||
|
if let Some(policy) = OrgPolicy::find_by_org_and_type(org_uuid, policy_type, conn).await {
|
||||||
|
return policy.enabled;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -153,39 +153,39 @@ impl Organization {
|
|||||||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
|
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Identifier": null, // not supported by us
|
"identifier": null, // not supported by us
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Seats": 10, // The value doesn't matter, we don't check server-side
|
"seats": 10, // The value doesn't matter, we don't check server-side
|
||||||
// "MaxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
|
// "maxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
|
||||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
"maxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
"maxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||||
"Use2fa": true,
|
"use2fa": true,
|
||||||
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||||
"UseEvents": CONFIG.org_events_enabled(),
|
"useEvents": CONFIG.org_events_enabled(),
|
||||||
"UseGroups": CONFIG.org_groups_enabled(),
|
"useGroups": CONFIG.org_groups_enabled(),
|
||||||
"UseTotp": true,
|
"useTotp": true,
|
||||||
"UsePolicies": true,
|
"usePolicies": true,
|
||||||
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
|
// "useScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||||
"UseSso": false, // Not supported
|
"useSso": false, // Not supported
|
||||||
// "UseKeyConnector": false, // Not supported
|
// "useKeyConnector": false, // Not supported
|
||||||
"SelfHost": true,
|
"selfHost": true,
|
||||||
"UseApi": true,
|
"useApi": true,
|
||||||
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
|
"hasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
|
||||||
"UseResetPassword": CONFIG.mail_enabled(),
|
"useResetPassword": CONFIG.mail_enabled(),
|
||||||
|
|
||||||
"BusinessName": null,
|
"businessName": null,
|
||||||
"BusinessAddress1": null,
|
"businessAddress1": null,
|
||||||
"BusinessAddress2": null,
|
"businessAddress2": null,
|
||||||
"BusinessAddress3": null,
|
"businessAddress3": null,
|
||||||
"BusinessCountry": null,
|
"businessCountry": null,
|
||||||
"BusinessTaxNumber": null,
|
"businessTaxNumber": null,
|
||||||
|
|
||||||
"BillingEmail": self.billing_email,
|
"billingEmail": self.billing_email,
|
||||||
"Plan": "TeamsAnnually",
|
"plan": "TeamsAnnually",
|
||||||
"PlanType": 5, // TeamsAnnually plan
|
"planType": 5, // TeamsAnnually plan
|
||||||
"UsersGetPremium": true,
|
"usersGetPremium": true,
|
||||||
"Object": "organization",
|
"object": "organization",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -214,7 +214,7 @@ impl UserOrganization {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn restore(&mut self) -> bool {
|
pub fn restore(&mut self) -> bool {
|
||||||
if self.status < UserOrgStatus::Accepted as i32 {
|
if self.status < UserOrgStatus::Invited as i32 {
|
||||||
self.status += ACTIVATE_REVOKE_DIFF;
|
self.status += ACTIVATE_REVOKE_DIFF;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -316,6 +316,7 @@ impl Organization {
|
|||||||
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
|
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
|
||||||
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
|
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
|
||||||
Group::delete_all_by_organization(&self.uuid, conn).await?;
|
Group::delete_all_by_organization(&self.uuid, conn).await?;
|
||||||
|
OrganizationApiKey::delete_all_by_organization(&self.uuid, conn).await?;
|
||||||
|
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
||||||
@@ -344,65 +345,81 @@ impl UserOrganization {
|
|||||||
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
|
||||||
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
|
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
|
||||||
|
|
||||||
|
let permissions = json!({
|
||||||
|
// TODO: Add support for Custom User Roles
|
||||||
|
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||||
|
"accessEventLogs": false,
|
||||||
|
"accessImportExport": false,
|
||||||
|
"accessReports": false,
|
||||||
|
"createNewCollections": false,
|
||||||
|
"editAnyCollection": false,
|
||||||
|
"deleteAnyCollection": false,
|
||||||
|
"editAssignedCollections": false,
|
||||||
|
"deleteAssignedCollections": false,
|
||||||
|
"manageGroups": false,
|
||||||
|
"managePolicies": false,
|
||||||
|
"manageSso": false, // Not supported
|
||||||
|
"manageUsers": false,
|
||||||
|
"manageResetPassword": false,
|
||||||
|
"manageScim": false // Not supported (Not AGPLv3 Licensed)
|
||||||
|
});
|
||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
|
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
|
||||||
json!({
|
json!({
|
||||||
"Id": self.org_uuid,
|
"id": self.org_uuid,
|
||||||
"Identifier": null, // Not supported
|
"identifier": null, // Not supported
|
||||||
"Name": org.name,
|
"name": org.name,
|
||||||
"Seats": 10, // The value doesn't matter, we don't check server-side
|
"seats": 10, // The value doesn't matter, we don't check server-side
|
||||||
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
"maxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||||
"UsersGetPremium": true,
|
"usersGetPremium": true,
|
||||||
"Use2fa": true,
|
"use2fa": true,
|
||||||
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
|
||||||
"UseEvents": CONFIG.org_events_enabled(),
|
"useEvents": CONFIG.org_events_enabled(),
|
||||||
"UseGroups": CONFIG.org_groups_enabled(),
|
"useGroups": CONFIG.org_groups_enabled(),
|
||||||
"UseTotp": true,
|
"useTotp": true,
|
||||||
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
|
"useScim": false, // Not supported (Not AGPLv3 Licensed)
|
||||||
"UsePolicies": true,
|
"usePolicies": true,
|
||||||
"UseApi": true,
|
"useApi": true,
|
||||||
"SelfHost": true,
|
"selfHost": true,
|
||||||
"HasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
|
"hasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
|
||||||
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
|
"resetPasswordEnrolled": self.reset_password_key.is_some(),
|
||||||
"UseResetPassword": CONFIG.mail_enabled(),
|
"useResetPassword": CONFIG.mail_enabled(),
|
||||||
"SsoBound": false, // Not supported
|
"ssoBound": false, // Not supported
|
||||||
"UseSso": false, // Not supported
|
"useSso": false, // Not supported
|
||||||
"ProviderId": null,
|
"useKeyConnector": false,
|
||||||
"ProviderName": null,
|
"useSecretsManager": false,
|
||||||
// "KeyConnectorEnabled": false,
|
"usePasswordManager": true,
|
||||||
// "KeyConnectorUrl": null,
|
"useCustomPermissions": false,
|
||||||
|
"useActivateAutofillPolicy": false,
|
||||||
|
|
||||||
// TODO: Add support for Custom User Roles
|
"providerId": null,
|
||||||
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
"providerName": null,
|
||||||
// "Permissions": {
|
"providerType": null,
|
||||||
// "AccessEventLogs": false,
|
"familySponsorshipFriendlyName": null,
|
||||||
// "AccessImportExport": false,
|
"familySponsorshipAvailable": false,
|
||||||
// "AccessReports": false,
|
"planProductType": 0,
|
||||||
// "ManageAllCollections": false,
|
"keyConnectorEnabled": false,
|
||||||
// "CreateNewCollections": false,
|
"keyConnectorUrl": null,
|
||||||
// "EditAnyCollection": false,
|
"familySponsorshipLastSyncDate": null,
|
||||||
// "DeleteAnyCollection": false,
|
"familySponsorshipValidUntil": null,
|
||||||
// "ManageAssignedCollections": false,
|
"familySponsorshipToDelete": null,
|
||||||
// "editAssignedCollections": false,
|
"accessSecretsManager": false,
|
||||||
// "deleteAssignedCollections": false,
|
"limitCollectionCreationDeletion": true,
|
||||||
// "ManageCiphers": false,
|
"allowAdminAccessToAllCollectionItems": true,
|
||||||
// "ManageGroups": false,
|
"flexibleCollections": false,
|
||||||
// "ManagePolicies": false,
|
|
||||||
// "ManageResetPassword": false,
|
|
||||||
// "ManageSso": false, // Not supported
|
|
||||||
// "ManageUsers": false,
|
|
||||||
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
|
|
||||||
// },
|
|
||||||
|
|
||||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
"permissions": permissions,
|
||||||
|
|
||||||
|
"maxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||||
|
|
||||||
// These are per user
|
// These are per user
|
||||||
"UserId": self.user_uuid,
|
"userId": self.user_uuid,
|
||||||
"Key": self.akey,
|
"key": self.akey,
|
||||||
"Status": self.status,
|
"status": self.status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"Enabled": true,
|
"enabled": true,
|
||||||
|
|
||||||
"Object": "profileOrganization",
|
"object": "profileOrganization",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -438,9 +455,9 @@ impl UserOrganization {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|cu| {
|
.map(|cu| {
|
||||||
json!({
|
json!({
|
||||||
"Id": cu.collection_uuid,
|
"id": cu.collection_uuid,
|
||||||
"ReadOnly": cu.read_only,
|
"readOnly": cu.read_only,
|
||||||
"HidePasswords": cu.hide_passwords,
|
"hidePasswords": cu.hide_passwords,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
@@ -449,29 +466,29 @@ impl UserOrganization {
|
|||||||
};
|
};
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"UserId": self.user_uuid,
|
"userId": self.user_uuid,
|
||||||
"Name": user.name,
|
"name": user.name,
|
||||||
"Email": user.email,
|
"email": user.email,
|
||||||
"ExternalId": self.external_id,
|
"externalId": self.external_id,
|
||||||
"Groups": groups,
|
"groups": groups,
|
||||||
"Collections": collections,
|
"collections": collections,
|
||||||
|
|
||||||
"Status": status,
|
"status": status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"AccessAll": self.access_all,
|
"accessAll": self.access_all,
|
||||||
"TwoFactorEnabled": twofactor_enabled,
|
"twoFactorEnabled": twofactor_enabled,
|
||||||
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
|
"resetPasswordEnrolled": self.reset_password_key.is_some(),
|
||||||
|
|
||||||
"Object": "organizationUserUserDetails",
|
"object": "organizationUserUserDetails",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_json_user_access_restrictions(&self, col_user: &CollectionUser) -> Value {
|
pub fn to_json_user_access_restrictions(&self, col_user: &CollectionUser) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"ReadOnly": col_user.read_only,
|
"readOnly": col_user.read_only,
|
||||||
"HidePasswords": col_user.hide_passwords,
|
"hidePasswords": col_user.hide_passwords,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -485,9 +502,9 @@ impl UserOrganization {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|c| {
|
.map(|c| {
|
||||||
json!({
|
json!({
|
||||||
"Id": c.collection_uuid,
|
"id": c.collection_uuid,
|
||||||
"ReadOnly": c.read_only,
|
"readOnly": c.read_only,
|
||||||
"HidePasswords": c.hide_passwords,
|
"hidePasswords": c.hide_passwords,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
@@ -502,15 +519,15 @@ impl UserOrganization {
|
|||||||
};
|
};
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"UserId": self.user_uuid,
|
"userId": self.user_uuid,
|
||||||
|
|
||||||
"Status": status,
|
"status": status,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"AccessAll": self.access_all,
|
"accessAll": self.access_all,
|
||||||
"Collections": coll_uuids,
|
"collections": coll_uuids,
|
||||||
|
|
||||||
"Object": "organizationUserDetails",
|
"object": "organizationUserDetails",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
|
||||||
@@ -648,8 +665,7 @@ impl UserOrganization {
|
|||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
users_organizations::table
|
users_organizations::table
|
||||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||||
.filter(users_organizations::status.eq(UserOrgStatus::Accepted as i32))
|
.filter(users_organizations::status.eq(UserOrgStatus::Accepted as i32).or(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)))
|
||||||
.or_filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
|
||||||
.count()
|
.count()
|
||||||
.first::<i64>(conn)
|
.first::<i64>(conn)
|
||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
@@ -665,6 +681,16 @@ impl UserOrganization {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_confirmed_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
users_organizations::table
|
||||||
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
|
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||||
|
.load::<UserOrganizationDb>(conn)
|
||||||
|
.unwrap_or_default().from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
users_organizations::table
|
users_organizations::table
|
||||||
@@ -769,6 +795,32 @@ impl UserOrganization {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_by_cipher_and_org_with_group(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
users_organizations::table
|
||||||
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
|
.inner_join(groups_users::table.on(
|
||||||
|
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
|
||||||
|
))
|
||||||
|
.left_join(collections_groups::table.on(
|
||||||
|
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
|
||||||
|
))
|
||||||
|
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
|
||||||
|
.left_join(ciphers_collections::table.on(
|
||||||
|
ciphers_collections::collection_uuid.eq(collections_groups::collections_uuid).and(ciphers_collections::cipher_uuid.eq(&cipher_uuid))
|
||||||
|
|
||||||
|
))
|
||||||
|
.filter(
|
||||||
|
groups::access_all.eq(true).or( // AccessAll via groups
|
||||||
|
ciphers_collections::cipher_uuid.eq(&cipher_uuid) // ..or access to collection via group
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.select(users_organizations::all_columns)
|
||||||
|
.distinct()
|
||||||
|
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations with groups").from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> bool {
|
pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> bool {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
users_organizations::table
|
users_organizations::table
|
||||||
@@ -852,6 +904,14 @@ impl OrganizationApiKey {
|
|||||||
.ok().from_db()
|
.ok().from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
db_run! { conn: {
|
||||||
|
diesel::delete(organization_api_key::table.filter(organization_api_key::org_uuid.eq(org_uuid)))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error removing organization api key from organization")
|
||||||
|
}}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
use chrono::{NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::util::LowerCase;
|
||||||
|
|
||||||
use super::User;
|
use super::User;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
@@ -122,48 +124,58 @@ impl Send {
|
|||||||
use data_encoding::BASE64URL_NOPAD;
|
use data_encoding::BASE64URL_NOPAD;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
|
||||||
|
|
||||||
|
// Mobile clients expect size to be a string instead of a number
|
||||||
|
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
|
||||||
|
data["size"] = Value::String(size.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"AccessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
|
"accessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
|
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Notes": self.notes,
|
"notes": self.notes,
|
||||||
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||||
|
|
||||||
"Key": self.akey,
|
"key": self.akey,
|
||||||
"MaxAccessCount": self.max_access_count,
|
"maxAccessCount": self.max_access_count,
|
||||||
"AccessCount": self.access_count,
|
"accessCount": self.access_count,
|
||||||
"Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
"password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
|
||||||
"Disabled": self.disabled,
|
"disabled": self.disabled,
|
||||||
"HideEmail": self.hide_email,
|
"hideEmail": self.hide_email,
|
||||||
|
|
||||||
"RevisionDate": format_date(&self.revision_date),
|
"revisionDate": format_date(&self.revision_date),
|
||||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
"expirationDate": self.expiration_date.as_ref().map(format_date),
|
||||||
"DeletionDate": format_date(&self.deletion_date),
|
"deletionDate": format_date(&self.deletion_date),
|
||||||
"Object": "send",
|
"object": "send",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn to_json_access(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json_access(&self, conn: &mut DbConn) -> Value {
|
||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
|
let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
|
||||||
|
|
||||||
|
// Mobile clients expect size to be a string instead of a number
|
||||||
|
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
|
||||||
|
data["size"] = Value::String(size.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
|
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
|
||||||
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
|
||||||
|
|
||||||
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
|
"expirationDate": self.expiration_date.as_ref().map(format_date),
|
||||||
"CreatorIdentifier": self.creator_identifier(conn).await,
|
"creatorIdentifier": self.creator_identifier(conn).await,
|
||||||
"Object": "send-access",
|
"object": "send-access",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -172,6 +184,7 @@ use crate::db::DbConn;
|
|||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
|
use crate::util::NumberOrString;
|
||||||
|
|
||||||
impl Send {
|
impl Send {
|
||||||
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
|
||||||
@@ -286,6 +299,29 @@ impl Send {
|
|||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> {
|
||||||
|
let sends = Self::find_by_user(user_uuid, conn).await;
|
||||||
|
|
||||||
|
#[derive(serde::Deserialize)]
|
||||||
|
struct FileData {
|
||||||
|
#[serde(rename = "size", alias = "Size")]
|
||||||
|
size: NumberOrString,
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut total: i64 = 0;
|
||||||
|
for send in sends {
|
||||||
|
if send.atype == SendType::File as i32 {
|
||||||
|
if let Ok(size) =
|
||||||
|
serde_json::from_str::<FileData>(&send.data).map_err(Into::into).and_then(|d| d.size.into_i64())
|
||||||
|
{
|
||||||
|
total = total.checked_add(size)?;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(total)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
sends::table
|
sends::table
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ db_object! {
|
|||||||
pub atype: i32,
|
pub atype: i32,
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub data: String,
|
pub data: String,
|
||||||
pub last_used: i32,
|
pub last_used: i64,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -34,6 +34,9 @@ pub enum TwoFactorType {
|
|||||||
EmailVerificationChallenge = 1002,
|
EmailVerificationChallenge = 1002,
|
||||||
WebauthnRegisterChallenge = 1003,
|
WebauthnRegisterChallenge = 1003,
|
||||||
WebauthnLoginChallenge = 1004,
|
WebauthnLoginChallenge = 1004,
|
||||||
|
|
||||||
|
// Special type for Protected Actions verification via email
|
||||||
|
ProtectedActions = 2000,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -51,17 +54,17 @@ impl TwoFactor {
|
|||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Enabled": self.enabled,
|
"enabled": self.enabled,
|
||||||
"Key": "", // This key and value vary
|
"key": "", // This key and value vary
|
||||||
"Object": "twoFactorAuthenticator" // This value varies
|
"Oobject": "twoFactorAuthenticator" // This value varies
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_json_provider(&self) -> Value {
|
pub fn to_json_provider(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Enabled": self.enabled,
|
"enabled": self.enabled,
|
||||||
"Type": self.atype,
|
"type": self.atype,
|
||||||
"Object": "twoFactorProvider"
|
"object": "twoFactorProvider"
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -92,7 +95,7 @@ impl TwoFactor {
|
|||||||
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
|
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
|
||||||
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||||
// not support multiple constraints on ON CONFLICT clauses.
|
// not support multiple constraints on ON CONFLICT clauses.
|
||||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
|
let _: () = diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
.map_res("Error deleting twofactor for insert")?;
|
.map_res("Error deleting twofactor for insert")?;
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::crypto;
|
use crate::crypto;
|
||||||
@@ -202,7 +202,7 @@ impl User {
|
|||||||
let stamp_exception = UserStampException {
|
let stamp_exception = UserStampException {
|
||||||
routes: route_exception,
|
routes: route_exception,
|
||||||
security_stamp: self.security_stamp.clone(),
|
security_stamp: self.security_stamp.clone(),
|
||||||
expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(),
|
expire: (Utc::now() + TimeDelta::try_minutes(2).unwrap()).timestamp(),
|
||||||
};
|
};
|
||||||
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
||||||
}
|
}
|
||||||
@@ -240,24 +240,26 @@ impl User {
|
|||||||
};
|
};
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"_Status": status as i32,
|
"_status": status as i32,
|
||||||
"Id": self.uuid,
|
"id": self.uuid,
|
||||||
"Name": self.name,
|
"name": self.name,
|
||||||
"Email": self.email,
|
"email": self.email,
|
||||||
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
"emailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
||||||
"Premium": true,
|
"premium": true,
|
||||||
"MasterPasswordHint": self.password_hint,
|
"premiumFromOrganization": false,
|
||||||
"Culture": "en-US",
|
"masterPasswordHint": self.password_hint,
|
||||||
"TwoFactorEnabled": twofactor_enabled,
|
"culture": "en-US",
|
||||||
"Key": self.akey,
|
"twoFactorEnabled": twofactor_enabled,
|
||||||
"PrivateKey": self.private_key,
|
"key": self.akey,
|
||||||
"SecurityStamp": self.security_stamp,
|
"privateKey": self.private_key,
|
||||||
"Organizations": orgs_json,
|
"securityStamp": self.security_stamp,
|
||||||
"Providers": [],
|
"organizations": orgs_json,
|
||||||
"ProviderOrganizations": [],
|
"providers": [],
|
||||||
"ForcePasswordReset": false,
|
"providerOrganizations": [],
|
||||||
"AvatarColor": self.avatar_color,
|
"forcePasswordReset": false,
|
||||||
"Object": "profile",
|
"avatarColor": self.avatar_color,
|
||||||
|
"usesKeyConnector": false,
|
||||||
|
"object": "profile",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -311,6 +313,7 @@ impl User {
|
|||||||
|
|
||||||
Send::delete_all_by_user(&self.uuid, conn).await?;
|
Send::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
|
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
|
||||||
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
|
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ table! {
|
|||||||
id -> Text,
|
id -> Text,
|
||||||
cipher_uuid -> Text,
|
cipher_uuid -> Text,
|
||||||
file_name -> Text,
|
file_name -> Text,
|
||||||
file_size -> Integer,
|
file_size -> BigInt,
|
||||||
akey -> Nullable<Text>,
|
akey -> Nullable<Text>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -160,7 +160,7 @@ table! {
|
|||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
enabled -> Bool,
|
enabled -> Bool,
|
||||||
data -> Text,
|
data -> Text,
|
||||||
last_used -> Integer,
|
last_used -> BigInt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ table! {
|
|||||||
id -> Text,
|
id -> Text,
|
||||||
cipher_uuid -> Text,
|
cipher_uuid -> Text,
|
||||||
file_name -> Text,
|
file_name -> Text,
|
||||||
file_size -> Integer,
|
file_size -> BigInt,
|
||||||
akey -> Nullable<Text>,
|
akey -> Nullable<Text>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -160,7 +160,7 @@ table! {
|
|||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
enabled -> Bool,
|
enabled -> Bool,
|
||||||
data -> Text,
|
data -> Text,
|
||||||
last_used -> Integer,
|
last_used -> BigInt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ table! {
|
|||||||
id -> Text,
|
id -> Text,
|
||||||
cipher_uuid -> Text,
|
cipher_uuid -> Text,
|
||||||
file_name -> Text,
|
file_name -> Text,
|
||||||
file_size -> Integer,
|
file_size -> BigInt,
|
||||||
akey -> Nullable<Text>,
|
akey -> Nullable<Text>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -160,7 +160,7 @@ table! {
|
|||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
enabled -> Bool,
|
enabled -> Bool,
|
||||||
data -> Text,
|
data -> Text,
|
||||||
last_used -> Integer,
|
last_used -> BigInt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
24
src/error.rs
24
src/error.rs
@@ -52,7 +52,6 @@ use rocket::error::Error as RocketErr;
|
|||||||
use serde_json::{Error as SerdeErr, Value};
|
use serde_json::{Error as SerdeErr, Value};
|
||||||
use std::io::Error as IoErr;
|
use std::io::Error as IoErr;
|
||||||
use std::time::SystemTimeError as TimeErr;
|
use std::time::SystemTimeError as TimeErr;
|
||||||
use tokio_tungstenite::tungstenite::Error as TungstError;
|
|
||||||
use webauthn_rs::error::WebauthnError as WebauthnErr;
|
use webauthn_rs::error::WebauthnError as WebauthnErr;
|
||||||
use yubico::yubicoerror::YubicoError as YubiErr;
|
use yubico::yubicoerror::YubicoError as YubiErr;
|
||||||
|
|
||||||
@@ -91,7 +90,6 @@ make_error! {
|
|||||||
|
|
||||||
DieselCon(DieselConErr): _has_source, _api_error,
|
DieselCon(DieselConErr): _has_source, _api_error,
|
||||||
Webauthn(WebauthnErr): _has_source, _api_error,
|
Webauthn(WebauthnErr): _has_source, _api_error,
|
||||||
WebSocket(TungstError): _has_source, _api_error,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for Error {
|
impl std::fmt::Debug for Error {
|
||||||
@@ -181,18 +179,18 @@ fn _serialize(e: &impl serde::Serialize, _msg: &str) -> String {
|
|||||||
|
|
||||||
fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
|
fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
|
||||||
let json = json!({
|
let json = json!({
|
||||||
"Message": msg,
|
"message": msg,
|
||||||
"error": "",
|
"error": "",
|
||||||
"error_description": "",
|
"error_description": "",
|
||||||
"ValidationErrors": {"": [ msg ]},
|
"validationErrors": {"": [ msg ]},
|
||||||
"ErrorModel": {
|
"errorModel": {
|
||||||
"Message": msg,
|
"message": msg,
|
||||||
"Object": "error"
|
"object": "error"
|
||||||
},
|
},
|
||||||
"ExceptionMessage": null,
|
"exceptionMessage": null,
|
||||||
"ExceptionStackTrace": null,
|
"exceptionStackTrace": null,
|
||||||
"InnerExceptionMessage": null,
|
"innerExceptionMessage": null,
|
||||||
"Object": "error"
|
"object": "error"
|
||||||
});
|
});
|
||||||
_serialize(&json, "")
|
_serialize(&json, "")
|
||||||
}
|
}
|
||||||
@@ -291,10 +289,10 @@ macro_rules! err_json {
|
|||||||
macro_rules! err_handler {
|
macro_rules! err_handler {
|
||||||
($expr:expr) => {{
|
($expr:expr) => {{
|
||||||
error!(target: "auth", "Unauthorized Error: {}", $expr);
|
error!(target: "auth", "Unauthorized Error: {}", $expr);
|
||||||
return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $expr));
|
return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, $expr));
|
||||||
}};
|
}};
|
||||||
($usr_msg:expr, $log_value:expr) => {{
|
($usr_msg:expr, $log_value:expr) => {{
|
||||||
error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value);
|
error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value);
|
||||||
return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $usr_msg));
|
return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, $usr_msg));
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|||||||
13
src/mail.rs
13
src/mail.rs
@@ -517,6 +517,19 @@ pub async fn send_admin_reset_password(address: &str, user_name: &str, org_name:
|
|||||||
send_email(address, &subject, body_html, body_text).await
|
send_email(address, &subject, body_html, body_text).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn send_protected_action_token(address: &str, token: &str) -> EmptyResult {
|
||||||
|
let (subject, body_html, body_text) = get_text(
|
||||||
|
"email/protected_action",
|
||||||
|
json!({
|
||||||
|
"url": CONFIG.domain(),
|
||||||
|
"img_src": CONFIG._smtp_img_src(),
|
||||||
|
"token": token,
|
||||||
|
}),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
send_email(address, &subject, body_html, body_text).await
|
||||||
|
}
|
||||||
|
|
||||||
async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
async fn send_with_selected_transport(email: Message) -> EmptyResult {
|
||||||
if CONFIG.use_sendmail() {
|
if CONFIG.use_sendmail() {
|
||||||
match sendmail_transport().send(email).await {
|
match sendmail_transport().send(email).await {
|
||||||
|
|||||||
92
src/main.rs
92
src/main.rs
@@ -1,40 +1,9 @@
|
|||||||
#![forbid(unsafe_code, non_ascii_idents)]
|
|
||||||
#![deny(
|
|
||||||
rust_2018_idioms,
|
|
||||||
rust_2021_compatibility,
|
|
||||||
noop_method_call,
|
|
||||||
pointer_structural_match,
|
|
||||||
trivial_casts,
|
|
||||||
trivial_numeric_casts,
|
|
||||||
unused_import_braces,
|
|
||||||
clippy::cast_lossless,
|
|
||||||
clippy::clone_on_ref_ptr,
|
|
||||||
clippy::equatable_if_let,
|
|
||||||
clippy::float_cmp_const,
|
|
||||||
clippy::inefficient_to_string,
|
|
||||||
clippy::iter_on_empty_collections,
|
|
||||||
clippy::iter_on_single_items,
|
|
||||||
clippy::linkedlist,
|
|
||||||
clippy::macro_use_imports,
|
|
||||||
clippy::manual_assert,
|
|
||||||
clippy::manual_instant_elapsed,
|
|
||||||
clippy::manual_string_new,
|
|
||||||
clippy::match_wildcard_for_single_variants,
|
|
||||||
clippy::mem_forget,
|
|
||||||
clippy::string_add_assign,
|
|
||||||
clippy::string_to_string,
|
|
||||||
clippy::unnecessary_join,
|
|
||||||
clippy::unnecessary_self_imports,
|
|
||||||
clippy::unused_async,
|
|
||||||
clippy::verbose_file_reads,
|
|
||||||
clippy::zero_sized_map_values
|
|
||||||
)]
|
|
||||||
#![cfg_attr(feature = "unstable", feature(ip))]
|
#![cfg_attr(feature = "unstable", feature(ip))]
|
||||||
// The recursion_limit is mainly triggered by the json!() macro.
|
// The recursion_limit is mainly triggered by the json!() macro.
|
||||||
// The more key/value pairs there are the more recursion occurs.
|
// The more key/value pairs there are the more recursion occurs.
|
||||||
// We want to keep this as low as possible, but not higher then 128.
|
// We want to keep this as low as possible, but not higher then 128.
|
||||||
// If you go above 128 it will cause rust-analyzer to fail,
|
// If you go above 128 it will cause rust-analyzer to fail,
|
||||||
#![recursion_limit = "103"]
|
#![recursion_limit = "200"]
|
||||||
|
|
||||||
// When enabled use MiMalloc as malloc instead of the default malloc
|
// When enabled use MiMalloc as malloc instead of the default malloc
|
||||||
#[cfg(feature = "enable_mimalloc")]
|
#[cfg(feature = "enable_mimalloc")]
|
||||||
@@ -83,12 +52,12 @@ mod ratelimit;
|
|||||||
mod util;
|
mod util;
|
||||||
|
|
||||||
use crate::api::purge_auth_requests;
|
use crate::api::purge_auth_requests;
|
||||||
use crate::api::WS_ANONYMOUS_SUBSCRIPTIONS;
|
use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
|
||||||
pub use config::CONFIG;
|
pub use config::CONFIG;
|
||||||
pub use error::{Error, MapResult};
|
pub use error::{Error, MapResult};
|
||||||
use rocket::data::{Limits, ToByteUnit};
|
use rocket::data::{Limits, ToByteUnit};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
pub use util::is_running_in_docker;
|
pub use util::is_running_in_container;
|
||||||
|
|
||||||
#[rocket::main]
|
#[rocket::main]
|
||||||
async fn main() -> Result<(), Error> {
|
async fn main() -> Result<(), Error> {
|
||||||
@@ -96,13 +65,17 @@ async fn main() -> Result<(), Error> {
|
|||||||
launch_info();
|
launch_info();
|
||||||
|
|
||||||
use log::LevelFilter as LF;
|
use log::LevelFilter as LF;
|
||||||
let level = LF::from_str(&CONFIG.log_level()).expect("Valid log level");
|
let level = LF::from_str(&CONFIG.log_level()).unwrap_or_else(|_| {
|
||||||
|
let valid_log_levels = LF::iter().map(|lvl| lvl.as_str().to_lowercase()).collect::<Vec<String>>().join(", ");
|
||||||
|
println!("Log level must be one of the following: {valid_log_levels}");
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
init_logging(level).ok();
|
init_logging(level).ok();
|
||||||
|
|
||||||
let extra_debug = matches!(level, LF::Trace | LF::Debug);
|
let extra_debug = matches!(level, LF::Trace | LF::Debug);
|
||||||
|
|
||||||
check_data_folder().await;
|
check_data_folder().await;
|
||||||
check_rsa_keys().unwrap_or_else(|_| {
|
auth::initialize_keys().unwrap_or_else(|_| {
|
||||||
error!("Error creating keys, exiting...");
|
error!("Error creating keys, exiting...");
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
@@ -238,9 +211,9 @@ fn launch_info() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||||
// Depending on the main log level we either want to disable or enable logging for trust-dns.
|
// Depending on the main log level we either want to disable or enable logging for hickory.
|
||||||
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this.
|
// Else if there are timeouts it will clutter the logs since hickory uses warn for this.
|
||||||
let trust_dns_level = if level >= log::LevelFilter::Debug {
|
let hickory_level = if level >= log::LevelFilter::Debug {
|
||||||
level
|
level
|
||||||
} else {
|
} else {
|
||||||
log::LevelFilter::Off
|
log::LevelFilter::Off
|
||||||
@@ -293,9 +266,9 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
.level_for("handlebars::render", handlebars_level)
|
.level_for("handlebars::render", handlebars_level)
|
||||||
// Prevent cookie_store logs
|
// Prevent cookie_store logs
|
||||||
.level_for("cookie_store", log::LevelFilter::Off)
|
.level_for("cookie_store", log::LevelFilter::Off)
|
||||||
// Variable level for trust-dns used by reqwest
|
// Variable level for hickory used by reqwest
|
||||||
.level_for("trust_dns_resolver::name_server::name_server", trust_dns_level)
|
.level_for("hickory_resolver::name_server::name_server", hickory_level)
|
||||||
.level_for("trust_dns_proto::xfer", trust_dns_level)
|
.level_for("hickory_proto::xfer", hickory_level)
|
||||||
.level_for("diesel_logger", diesel_logger_level)
|
.level_for("diesel_logger", diesel_logger_level)
|
||||||
.chain(std::io::stdout());
|
.chain(std::io::stdout());
|
||||||
|
|
||||||
@@ -415,7 +388,7 @@ async fn check_data_folder() {
|
|||||||
let path = Path::new(data_folder);
|
let path = Path::new(data_folder);
|
||||||
if !path.exists() {
|
if !path.exists() {
|
||||||
error!("Data folder '{}' doesn't exist.", data_folder);
|
error!("Data folder '{}' doesn't exist.", data_folder);
|
||||||
if is_running_in_docker() {
|
if is_running_in_container() {
|
||||||
error!("Verify that your data volume is mounted at the correct location.");
|
error!("Verify that your data volume is mounted at the correct location.");
|
||||||
} else {
|
} else {
|
||||||
error!("Create the data folder and try again.");
|
error!("Create the data folder and try again.");
|
||||||
@@ -427,9 +400,9 @@ async fn check_data_folder() {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_running_in_docker()
|
if is_running_in_container()
|
||||||
&& std::env::var("I_REALLY_WANT_VOLATILE_STORAGE").is_err()
|
&& std::env::var("I_REALLY_WANT_VOLATILE_STORAGE").is_err()
|
||||||
&& !docker_data_folder_is_persistent(data_folder).await
|
&& !container_data_folder_is_persistent(data_folder).await
|
||||||
{
|
{
|
||||||
error!(
|
error!(
|
||||||
"No persistent volume!\n\
|
"No persistent volume!\n\
|
||||||
@@ -448,7 +421,7 @@ async fn check_data_folder() {
|
|||||||
/// A none persistent volume in either Docker or Podman is represented by a 64 alphanumerical string.
|
/// A none persistent volume in either Docker or Podman is represented by a 64 alphanumerical string.
|
||||||
/// If we detect this string, we will alert about not having a persistent self defined volume.
|
/// If we detect this string, we will alert about not having a persistent self defined volume.
|
||||||
/// This probably means that someone forgot to add `-v /path/to/vaultwarden_data/:/data`
|
/// This probably means that someone forgot to add `-v /path/to/vaultwarden_data/:/data`
|
||||||
async fn docker_data_folder_is_persistent(data_folder: &str) -> bool {
|
async fn container_data_folder_is_persistent(data_folder: &str) -> bool {
|
||||||
if let Ok(mountinfo) = File::open("/proc/self/mountinfo").await {
|
if let Ok(mountinfo) = File::open("/proc/self/mountinfo").await {
|
||||||
// Since there can only be one mountpoint to the DATA_FOLDER
|
// Since there can only be one mountpoint to the DATA_FOLDER
|
||||||
// We do a basic check for this mountpoint surrounded by a space.
|
// We do a basic check for this mountpoint surrounded by a space.
|
||||||
@@ -475,31 +448,6 @@ async fn docker_data_folder_is_persistent(data_folder: &str) -> bool {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_rsa_keys() -> Result<(), crate::error::Error> {
|
|
||||||
// If the RSA keys don't exist, try to create them
|
|
||||||
let priv_path = CONFIG.private_rsa_key();
|
|
||||||
let pub_path = CONFIG.public_rsa_key();
|
|
||||||
|
|
||||||
if !util::file_exists(&priv_path) {
|
|
||||||
let rsa_key = openssl::rsa::Rsa::generate(2048)?;
|
|
||||||
|
|
||||||
let priv_key = rsa_key.private_key_to_pem()?;
|
|
||||||
crate::util::write_file(&priv_path, &priv_key)?;
|
|
||||||
info!("Private key created correctly.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if !util::file_exists(&pub_path) {
|
|
||||||
let rsa_key = openssl::rsa::Rsa::private_key_from_pem(&std::fs::read(&priv_path)?)?;
|
|
||||||
|
|
||||||
let pub_key = rsa_key.public_key_to_pem()?;
|
|
||||||
crate::util::write_file(&pub_path, &pub_key)?;
|
|
||||||
info!("Public key created correctly.");
|
|
||||||
}
|
|
||||||
|
|
||||||
auth::load_keys();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_web_vault() {
|
fn check_web_vault() {
|
||||||
if !CONFIG.web_vault_enabled() {
|
if !CONFIG.web_vault_enabled() {
|
||||||
return;
|
return;
|
||||||
@@ -553,7 +501,7 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
|
|||||||
.register([basepath, "/api"].concat(), api::core_catchers())
|
.register([basepath, "/api"].concat(), api::core_catchers())
|
||||||
.register([basepath, "/admin"].concat(), api::admin_catchers())
|
.register([basepath, "/admin"].concat(), api::admin_catchers())
|
||||||
.manage(pool)
|
.manage(pool)
|
||||||
.manage(api::start_notification_server())
|
.manage(Arc::clone(&WS_USERS))
|
||||||
.manage(Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS))
|
.manage(Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS))
|
||||||
.attach(util::AppHeaders())
|
.attach(util::AppHeaders())
|
||||||
.attach(util::Cors())
|
.attach(util::Cors())
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
10
src/static/scripts/admin_diagnostics.js
vendored
10
src/static/scripts/admin_diagnostics.js
vendored
@@ -21,7 +21,11 @@ const browserUTC = `${year}-${month}-${day} ${hour}:${minute}:${seconds} UTC`;
|
|||||||
|
|
||||||
// ================================
|
// ================================
|
||||||
// Check if the output is a valid IP
|
// Check if the output is a valid IP
|
||||||
const isValidIp = value => (/^(?:(?:^|\.)(?:2(?:5[0-5]|[0-4]\d)|1?\d?\d)){4}$/.test(value) ? true : false);
|
function isValidIp(ip) {
|
||||||
|
const ipv4Regex = /^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/;
|
||||||
|
const ipv6Regex = /^(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}|((?:[a-fA-F0-9]{1,4}:){1,7}:|:(:[a-fA-F0-9]{1,4}){1,7}|[a-fA-F0-9]{1,4}:((:[a-fA-F0-9]{1,4}){1,6}))$/;
|
||||||
|
return ipv4Regex.test(ip) || ipv6Regex.test(ip);
|
||||||
|
}
|
||||||
|
|
||||||
function checkVersions(platform, installed, latest, commit=null) {
|
function checkVersions(platform, installed, latest, commit=null) {
|
||||||
if (installed === "-" || latest === "-") {
|
if (installed === "-" || latest === "-") {
|
||||||
@@ -77,7 +81,7 @@ async function generateSupportString(event, dj) {
|
|||||||
supportString += `* Vaultwarden version: v${dj.current_release}\n`;
|
supportString += `* Vaultwarden version: v${dj.current_release}\n`;
|
||||||
supportString += `* Web-vault version: v${dj.web_vault_version}\n`;
|
supportString += `* Web-vault version: v${dj.web_vault_version}\n`;
|
||||||
supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`;
|
supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`;
|
||||||
supportString += `* Running within Docker: ${dj.running_within_docker} (Base: ${dj.docker_base_image})\n`;
|
supportString += `* Running within a container: ${dj.running_within_container} (Base: ${dj.container_base_image})\n`;
|
||||||
supportString += "* Environment settings overridden: ";
|
supportString += "* Environment settings overridden: ";
|
||||||
if (dj.overrides != "") {
|
if (dj.overrides != "") {
|
||||||
supportString += "true\n";
|
supportString += "true\n";
|
||||||
@@ -179,7 +183,7 @@ function initVersionCheck(dj) {
|
|||||||
}
|
}
|
||||||
checkVersions("server", serverInstalled, serverLatest, serverLatestCommit);
|
checkVersions("server", serverInstalled, serverLatest, serverLatestCommit);
|
||||||
|
|
||||||
if (!dj.running_within_docker) {
|
if (!dj.running_within_container) {
|
||||||
const webInstalled = dj.web_vault_version;
|
const webInstalled = dj.web_vault_version;
|
||||||
const webLatest = dj.latest_web_build;
|
const webLatest = dj.latest_web_build;
|
||||||
checkVersions("web", webInstalled, webLatest);
|
checkVersions("web", webInstalled, webLatest);
|
||||||
|
|||||||
17
src/static/scripts/bootstrap.bundle.js
vendored
17
src/static/scripts/bootstrap.bundle.js
vendored
@@ -1,6 +1,6 @@
|
|||||||
/*!
|
/*!
|
||||||
* Bootstrap v5.3.1 (https://getbootstrap.com/)
|
* Bootstrap v5.3.3 (https://getbootstrap.com/)
|
||||||
* Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
|
* Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
|
||||||
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
||||||
*/
|
*/
|
||||||
(function (global, factory) {
|
(function (global, factory) {
|
||||||
@@ -210,7 +210,6 @@
|
|||||||
const reflow = element => {
|
const reflow = element => {
|
||||||
element.offsetHeight; // eslint-disable-line no-unused-expressions
|
element.offsetHeight; // eslint-disable-line no-unused-expressions
|
||||||
};
|
};
|
||||||
|
|
||||||
const getjQuery = () => {
|
const getjQuery = () => {
|
||||||
if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {
|
if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {
|
||||||
return window.jQuery;
|
return window.jQuery;
|
||||||
@@ -648,7 +647,7 @@
|
|||||||
* Constants
|
* Constants
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const VERSION = '5.3.1';
|
const VERSION = '5.3.3';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class definition
|
* Class definition
|
||||||
@@ -731,7 +730,7 @@
|
|||||||
}
|
}
|
||||||
selector = hrefAttribute && hrefAttribute !== '#' ? hrefAttribute.trim() : null;
|
selector = hrefAttribute && hrefAttribute !== '#' ? hrefAttribute.trim() : null;
|
||||||
}
|
}
|
||||||
return parseSelector(selector);
|
return selector ? selector.split(',').map(sel => parseSelector(sel)).join(',') : null;
|
||||||
};
|
};
|
||||||
const SelectorEngine = {
|
const SelectorEngine = {
|
||||||
find(selector, element = document.documentElement) {
|
find(selector, element = document.documentElement) {
|
||||||
@@ -3916,7 +3915,6 @@
|
|||||||
// if false, we use the backdrop helper without adding any element to the dom
|
// if false, we use the backdrop helper without adding any element to the dom
|
||||||
rootElement: 'body' // give the choice to place backdrop under different elements
|
rootElement: 'body' // give the choice to place backdrop under different elements
|
||||||
};
|
};
|
||||||
|
|
||||||
const DefaultType$8 = {
|
const DefaultType$8 = {
|
||||||
className: 'string',
|
className: 'string',
|
||||||
clickCallback: '(function|null)',
|
clickCallback: '(function|null)',
|
||||||
@@ -4041,7 +4039,6 @@
|
|||||||
autofocus: true,
|
autofocus: true,
|
||||||
trapElement: null // The element to trap focus inside of
|
trapElement: null // The element to trap focus inside of
|
||||||
};
|
};
|
||||||
|
|
||||||
const DefaultType$7 = {
|
const DefaultType$7 = {
|
||||||
autofocus: 'boolean',
|
autofocus: 'boolean',
|
||||||
trapElement: 'element'
|
trapElement: 'element'
|
||||||
@@ -4768,7 +4765,10 @@
|
|||||||
br: [],
|
br: [],
|
||||||
col: [],
|
col: [],
|
||||||
code: [],
|
code: [],
|
||||||
|
dd: [],
|
||||||
div: [],
|
div: [],
|
||||||
|
dl: [],
|
||||||
|
dt: [],
|
||||||
em: [],
|
em: [],
|
||||||
hr: [],
|
hr: [],
|
||||||
h1: [],
|
h1: [],
|
||||||
@@ -5866,7 +5866,7 @@
|
|||||||
const CLASS_DROPDOWN = 'dropdown';
|
const CLASS_DROPDOWN = 'dropdown';
|
||||||
const SELECTOR_DROPDOWN_TOGGLE = '.dropdown-toggle';
|
const SELECTOR_DROPDOWN_TOGGLE = '.dropdown-toggle';
|
||||||
const SELECTOR_DROPDOWN_MENU = '.dropdown-menu';
|
const SELECTOR_DROPDOWN_MENU = '.dropdown-menu';
|
||||||
const NOT_SELECTOR_DROPDOWN_TOGGLE = ':not(.dropdown-toggle)';
|
const NOT_SELECTOR_DROPDOWN_TOGGLE = `:not(${SELECTOR_DROPDOWN_TOGGLE})`;
|
||||||
const SELECTOR_TAB_PANEL = '.list-group, .nav, [role="tablist"]';
|
const SELECTOR_TAB_PANEL = '.list-group, .nav, [role="tablist"]';
|
||||||
const SELECTOR_OUTER = '.nav-item, .list-group-item';
|
const SELECTOR_OUTER = '.nav-item, .list-group-item';
|
||||||
const SELECTOR_INNER = `.nav-link${NOT_SELECTOR_DROPDOWN_TOGGLE}, .list-group-item${NOT_SELECTOR_DROPDOWN_TOGGLE}, [role="tab"]${NOT_SELECTOR_DROPDOWN_TOGGLE}`;
|
const SELECTOR_INNER = `.nav-link${NOT_SELECTOR_DROPDOWN_TOGGLE}, .list-group-item${NOT_SELECTOR_DROPDOWN_TOGGLE}, [role="tab"]${NOT_SELECTOR_DROPDOWN_TOGGLE}`;
|
||||||
@@ -6311,3 +6311,4 @@
|
|||||||
return index_umd;
|
return index_umd;
|
||||||
|
|
||||||
}));
|
}));
|
||||||
|
//# sourceMappingURL=bootstrap.bundle.js.map
|
||||||
|
|||||||
132
src/static/scripts/bootstrap.css
vendored
132
src/static/scripts/bootstrap.css
vendored
@@ -1,7 +1,7 @@
|
|||||||
@charset "UTF-8";
|
@charset "UTF-8";
|
||||||
/*!
|
/*!
|
||||||
* Bootstrap v5.3.1 (https://getbootstrap.com/)
|
* Bootstrap v5.3.3 (https://getbootstrap.com/)
|
||||||
* Copyright 2011-2023 The Bootstrap Authors
|
* Copyright 2011-2024 The Bootstrap Authors
|
||||||
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
||||||
*/
|
*/
|
||||||
:root,
|
:root,
|
||||||
@@ -99,6 +99,7 @@
|
|||||||
--bs-link-hover-color: #0a58ca;
|
--bs-link-hover-color: #0a58ca;
|
||||||
--bs-link-hover-color-rgb: 10, 88, 202;
|
--bs-link-hover-color-rgb: 10, 88, 202;
|
||||||
--bs-code-color: #d63384;
|
--bs-code-color: #d63384;
|
||||||
|
--bs-highlight-color: #212529;
|
||||||
--bs-highlight-bg: #fff3cd;
|
--bs-highlight-bg: #fff3cd;
|
||||||
--bs-border-width: 1px;
|
--bs-border-width: 1px;
|
||||||
--bs-border-style: solid;
|
--bs-border-style: solid;
|
||||||
@@ -170,6 +171,8 @@
|
|||||||
--bs-link-color-rgb: 110, 168, 254;
|
--bs-link-color-rgb: 110, 168, 254;
|
||||||
--bs-link-hover-color-rgb: 139, 185, 254;
|
--bs-link-hover-color-rgb: 139, 185, 254;
|
||||||
--bs-code-color: #e685b5;
|
--bs-code-color: #e685b5;
|
||||||
|
--bs-highlight-color: #dee2e6;
|
||||||
|
--bs-highlight-bg: #664d03;
|
||||||
--bs-border-color: #495057;
|
--bs-border-color: #495057;
|
||||||
--bs-border-color-translucent: rgba(255, 255, 255, 0.15);
|
--bs-border-color-translucent: rgba(255, 255, 255, 0.15);
|
||||||
--bs-form-valid-color: #75b798;
|
--bs-form-valid-color: #75b798;
|
||||||
@@ -325,6 +328,7 @@ small, .small {
|
|||||||
|
|
||||||
mark, .mark {
|
mark, .mark {
|
||||||
padding: 0.1875em;
|
padding: 0.1875em;
|
||||||
|
color: var(--bs-highlight-color);
|
||||||
background-color: var(--bs-highlight-bg);
|
background-color: var(--bs-highlight-bg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -819,7 +823,7 @@ progress {
|
|||||||
|
|
||||||
.row-cols-3 > * {
|
.row-cols-3 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 33.3333333333%;
|
width: 33.33333333%;
|
||||||
}
|
}
|
||||||
|
|
||||||
.row-cols-4 > * {
|
.row-cols-4 > * {
|
||||||
@@ -834,7 +838,7 @@ progress {
|
|||||||
|
|
||||||
.row-cols-6 > * {
|
.row-cols-6 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 16.6666666667%;
|
width: 16.66666667%;
|
||||||
}
|
}
|
||||||
|
|
||||||
.col-auto {
|
.col-auto {
|
||||||
@@ -1024,7 +1028,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-sm-3 > * {
|
.row-cols-sm-3 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 33.3333333333%;
|
width: 33.33333333%;
|
||||||
}
|
}
|
||||||
.row-cols-sm-4 > * {
|
.row-cols-sm-4 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1036,7 +1040,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-sm-6 > * {
|
.row-cols-sm-6 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 16.6666666667%;
|
width: 16.66666667%;
|
||||||
}
|
}
|
||||||
.col-sm-auto {
|
.col-sm-auto {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1193,7 +1197,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-md-3 > * {
|
.row-cols-md-3 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 33.3333333333%;
|
width: 33.33333333%;
|
||||||
}
|
}
|
||||||
.row-cols-md-4 > * {
|
.row-cols-md-4 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1205,7 +1209,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-md-6 > * {
|
.row-cols-md-6 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 16.6666666667%;
|
width: 16.66666667%;
|
||||||
}
|
}
|
||||||
.col-md-auto {
|
.col-md-auto {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1362,7 +1366,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-lg-3 > * {
|
.row-cols-lg-3 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 33.3333333333%;
|
width: 33.33333333%;
|
||||||
}
|
}
|
||||||
.row-cols-lg-4 > * {
|
.row-cols-lg-4 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1374,7 +1378,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-lg-6 > * {
|
.row-cols-lg-6 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 16.6666666667%;
|
width: 16.66666667%;
|
||||||
}
|
}
|
||||||
.col-lg-auto {
|
.col-lg-auto {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1531,7 +1535,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-xl-3 > * {
|
.row-cols-xl-3 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 33.3333333333%;
|
width: 33.33333333%;
|
||||||
}
|
}
|
||||||
.row-cols-xl-4 > * {
|
.row-cols-xl-4 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1543,7 +1547,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-xl-6 > * {
|
.row-cols-xl-6 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 16.6666666667%;
|
width: 16.66666667%;
|
||||||
}
|
}
|
||||||
.col-xl-auto {
|
.col-xl-auto {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1700,7 +1704,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-xxl-3 > * {
|
.row-cols-xxl-3 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 33.3333333333%;
|
width: 33.33333333%;
|
||||||
}
|
}
|
||||||
.row-cols-xxl-4 > * {
|
.row-cols-xxl-4 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1712,7 +1716,7 @@ progress {
|
|||||||
}
|
}
|
||||||
.row-cols-xxl-6 > * {
|
.row-cols-xxl-6 > * {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
width: 16.6666666667%;
|
width: 16.66666667%;
|
||||||
}
|
}
|
||||||
.col-xxl-auto {
|
.col-xxl-auto {
|
||||||
flex: 0 0 auto;
|
flex: 0 0 auto;
|
||||||
@@ -1856,16 +1860,16 @@ progress {
|
|||||||
--bs-table-bg-type: initial;
|
--bs-table-bg-type: initial;
|
||||||
--bs-table-color-state: initial;
|
--bs-table-color-state: initial;
|
||||||
--bs-table-bg-state: initial;
|
--bs-table-bg-state: initial;
|
||||||
--bs-table-color: var(--bs-body-color);
|
--bs-table-color: var(--bs-emphasis-color);
|
||||||
--bs-table-bg: var(--bs-body-bg);
|
--bs-table-bg: var(--bs-body-bg);
|
||||||
--bs-table-border-color: var(--bs-border-color);
|
--bs-table-border-color: var(--bs-border-color);
|
||||||
--bs-table-accent-bg: transparent;
|
--bs-table-accent-bg: transparent;
|
||||||
--bs-table-striped-color: var(--bs-body-color);
|
--bs-table-striped-color: var(--bs-emphasis-color);
|
||||||
--bs-table-striped-bg: rgba(0, 0, 0, 0.05);
|
--bs-table-striped-bg: rgba(var(--bs-emphasis-color-rgb), 0.05);
|
||||||
--bs-table-active-color: var(--bs-body-color);
|
--bs-table-active-color: var(--bs-emphasis-color);
|
||||||
--bs-table-active-bg: rgba(0, 0, 0, 0.1);
|
--bs-table-active-bg: rgba(var(--bs-emphasis-color-rgb), 0.1);
|
||||||
--bs-table-hover-color: var(--bs-body-color);
|
--bs-table-hover-color: var(--bs-emphasis-color);
|
||||||
--bs-table-hover-bg: rgba(0, 0, 0, 0.075);
|
--bs-table-hover-bg: rgba(var(--bs-emphasis-color-rgb), 0.075);
|
||||||
width: 100%;
|
width: 100%;
|
||||||
margin-bottom: 1rem;
|
margin-bottom: 1rem;
|
||||||
vertical-align: top;
|
vertical-align: top;
|
||||||
@@ -1934,7 +1938,7 @@ progress {
|
|||||||
.table-primary {
|
.table-primary {
|
||||||
--bs-table-color: #000;
|
--bs-table-color: #000;
|
||||||
--bs-table-bg: #cfe2ff;
|
--bs-table-bg: #cfe2ff;
|
||||||
--bs-table-border-color: #bacbe6;
|
--bs-table-border-color: #a6b5cc;
|
||||||
--bs-table-striped-bg: #c5d7f2;
|
--bs-table-striped-bg: #c5d7f2;
|
||||||
--bs-table-striped-color: #000;
|
--bs-table-striped-color: #000;
|
||||||
--bs-table-active-bg: #bacbe6;
|
--bs-table-active-bg: #bacbe6;
|
||||||
@@ -1948,7 +1952,7 @@ progress {
|
|||||||
.table-secondary {
|
.table-secondary {
|
||||||
--bs-table-color: #000;
|
--bs-table-color: #000;
|
||||||
--bs-table-bg: #e2e3e5;
|
--bs-table-bg: #e2e3e5;
|
||||||
--bs-table-border-color: #cbccce;
|
--bs-table-border-color: #b5b6b7;
|
||||||
--bs-table-striped-bg: #d7d8da;
|
--bs-table-striped-bg: #d7d8da;
|
||||||
--bs-table-striped-color: #000;
|
--bs-table-striped-color: #000;
|
||||||
--bs-table-active-bg: #cbccce;
|
--bs-table-active-bg: #cbccce;
|
||||||
@@ -1962,7 +1966,7 @@ progress {
|
|||||||
.table-success {
|
.table-success {
|
||||||
--bs-table-color: #000;
|
--bs-table-color: #000;
|
||||||
--bs-table-bg: #d1e7dd;
|
--bs-table-bg: #d1e7dd;
|
||||||
--bs-table-border-color: #bcd0c7;
|
--bs-table-border-color: #a7b9b1;
|
||||||
--bs-table-striped-bg: #c7dbd2;
|
--bs-table-striped-bg: #c7dbd2;
|
||||||
--bs-table-striped-color: #000;
|
--bs-table-striped-color: #000;
|
||||||
--bs-table-active-bg: #bcd0c7;
|
--bs-table-active-bg: #bcd0c7;
|
||||||
@@ -1976,7 +1980,7 @@ progress {
|
|||||||
.table-info {
|
.table-info {
|
||||||
--bs-table-color: #000;
|
--bs-table-color: #000;
|
||||||
--bs-table-bg: #cff4fc;
|
--bs-table-bg: #cff4fc;
|
||||||
--bs-table-border-color: #badce3;
|
--bs-table-border-color: #a6c3ca;
|
||||||
--bs-table-striped-bg: #c5e8ef;
|
--bs-table-striped-bg: #c5e8ef;
|
||||||
--bs-table-striped-color: #000;
|
--bs-table-striped-color: #000;
|
||||||
--bs-table-active-bg: #badce3;
|
--bs-table-active-bg: #badce3;
|
||||||
@@ -1990,7 +1994,7 @@ progress {
|
|||||||
.table-warning {
|
.table-warning {
|
||||||
--bs-table-color: #000;
|
--bs-table-color: #000;
|
||||||
--bs-table-bg: #fff3cd;
|
--bs-table-bg: #fff3cd;
|
||||||
--bs-table-border-color: #e6dbb9;
|
--bs-table-border-color: #ccc2a4;
|
||||||
--bs-table-striped-bg: #f2e7c3;
|
--bs-table-striped-bg: #f2e7c3;
|
||||||
--bs-table-striped-color: #000;
|
--bs-table-striped-color: #000;
|
||||||
--bs-table-active-bg: #e6dbb9;
|
--bs-table-active-bg: #e6dbb9;
|
||||||
@@ -2004,7 +2008,7 @@ progress {
|
|||||||
.table-danger {
|
.table-danger {
|
||||||
--bs-table-color: #000;
|
--bs-table-color: #000;
|
||||||
--bs-table-bg: #f8d7da;
|
--bs-table-bg: #f8d7da;
|
||||||
--bs-table-border-color: #dfc2c4;
|
--bs-table-border-color: #c6acae;
|
||||||
--bs-table-striped-bg: #eccccf;
|
--bs-table-striped-bg: #eccccf;
|
||||||
--bs-table-striped-color: #000;
|
--bs-table-striped-color: #000;
|
||||||
--bs-table-active-bg: #dfc2c4;
|
--bs-table-active-bg: #dfc2c4;
|
||||||
@@ -2018,7 +2022,7 @@ progress {
|
|||||||
.table-light {
|
.table-light {
|
||||||
--bs-table-color: #000;
|
--bs-table-color: #000;
|
||||||
--bs-table-bg: #f8f9fa;
|
--bs-table-bg: #f8f9fa;
|
||||||
--bs-table-border-color: #dfe0e1;
|
--bs-table-border-color: #c6c7c8;
|
||||||
--bs-table-striped-bg: #ecedee;
|
--bs-table-striped-bg: #ecedee;
|
||||||
--bs-table-striped-color: #000;
|
--bs-table-striped-color: #000;
|
||||||
--bs-table-active-bg: #dfe0e1;
|
--bs-table-active-bg: #dfe0e1;
|
||||||
@@ -2032,7 +2036,7 @@ progress {
|
|||||||
.table-dark {
|
.table-dark {
|
||||||
--bs-table-color: #fff;
|
--bs-table-color: #fff;
|
||||||
--bs-table-bg: #212529;
|
--bs-table-bg: #212529;
|
||||||
--bs-table-border-color: #373b3e;
|
--bs-table-border-color: #4d5154;
|
||||||
--bs-table-striped-bg: #2c3034;
|
--bs-table-striped-bg: #2c3034;
|
||||||
--bs-table-striped-color: #fff;
|
--bs-table-striped-color: #fff;
|
||||||
--bs-table-active-bg: #373b3e;
|
--bs-table-active-bg: #373b3e;
|
||||||
@@ -2388,6 +2392,7 @@ textarea.form-control-lg {
|
|||||||
|
|
||||||
.form-check-input {
|
.form-check-input {
|
||||||
--bs-form-check-bg: var(--bs-body-bg);
|
--bs-form-check-bg: var(--bs-body-bg);
|
||||||
|
flex-shrink: 0;
|
||||||
width: 1em;
|
width: 1em;
|
||||||
height: 1em;
|
height: 1em;
|
||||||
margin-top: 0.25em;
|
margin-top: 0.25em;
|
||||||
@@ -2544,7 +2549,7 @@ textarea.form-control-lg {
|
|||||||
height: 0.5rem;
|
height: 0.5rem;
|
||||||
color: transparent;
|
color: transparent;
|
||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
background-color: var(--bs-tertiary-bg);
|
background-color: var(--bs-secondary-bg);
|
||||||
border-color: transparent;
|
border-color: transparent;
|
||||||
border-radius: 1rem;
|
border-radius: 1rem;
|
||||||
}
|
}
|
||||||
@@ -2573,7 +2578,7 @@ textarea.form-control-lg {
|
|||||||
height: 0.5rem;
|
height: 0.5rem;
|
||||||
color: transparent;
|
color: transparent;
|
||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
background-color: var(--bs-tertiary-bg);
|
background-color: var(--bs-secondary-bg);
|
||||||
border-color: transparent;
|
border-color: transparent;
|
||||||
border-radius: 1rem;
|
border-radius: 1rem;
|
||||||
}
|
}
|
||||||
@@ -3037,6 +3042,9 @@ textarea.form-control-lg {
|
|||||||
.btn-check:checked + .btn:focus-visible, :not(.btn-check) + .btn:active:focus-visible, .btn:first-child:active:focus-visible, .btn.active:focus-visible, .btn.show:focus-visible {
|
.btn-check:checked + .btn:focus-visible, :not(.btn-check) + .btn:active:focus-visible, .btn:first-child:active:focus-visible, .btn.active:focus-visible, .btn.show:focus-visible {
|
||||||
box-shadow: var(--bs-btn-focus-box-shadow);
|
box-shadow: var(--bs-btn-focus-box-shadow);
|
||||||
}
|
}
|
||||||
|
.btn-check:checked:focus-visible + .btn {
|
||||||
|
box-shadow: var(--bs-btn-focus-box-shadow);
|
||||||
|
}
|
||||||
.btn:disabled, .btn.disabled, fieldset:disabled .btn {
|
.btn:disabled, .btn.disabled, fieldset:disabled .btn {
|
||||||
color: var(--bs-btn-disabled-color);
|
color: var(--bs-btn-disabled-color);
|
||||||
pointer-events: none;
|
pointer-events: none;
|
||||||
@@ -3431,7 +3439,7 @@ textarea.form-control-lg {
|
|||||||
--bs-dropdown-inner-border-radius: calc(var(--bs-border-radius) - var(--bs-border-width));
|
--bs-dropdown-inner-border-radius: calc(var(--bs-border-radius) - var(--bs-border-width));
|
||||||
--bs-dropdown-divider-bg: var(--bs-border-color-translucent);
|
--bs-dropdown-divider-bg: var(--bs-border-color-translucent);
|
||||||
--bs-dropdown-divider-margin-y: 0.5rem;
|
--bs-dropdown-divider-margin-y: 0.5rem;
|
||||||
--bs-dropdown-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15);
|
--bs-dropdown-box-shadow: var(--bs-box-shadow);
|
||||||
--bs-dropdown-link-color: var(--bs-body-color);
|
--bs-dropdown-link-color: var(--bs-body-color);
|
||||||
--bs-dropdown-link-hover-color: var(--bs-body-color);
|
--bs-dropdown-link-hover-color: var(--bs-body-color);
|
||||||
--bs-dropdown-link-hover-bg: var(--bs-tertiary-bg);
|
--bs-dropdown-link-hover-bg: var(--bs-tertiary-bg);
|
||||||
@@ -4568,12 +4576,11 @@ textarea.form-control-lg {
|
|||||||
--bs-accordion-btn-padding-y: 1rem;
|
--bs-accordion-btn-padding-y: 1rem;
|
||||||
--bs-accordion-btn-color: var(--bs-body-color);
|
--bs-accordion-btn-color: var(--bs-body-color);
|
||||||
--bs-accordion-btn-bg: var(--bs-accordion-bg);
|
--bs-accordion-btn-bg: var(--bs-accordion-bg);
|
||||||
--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23212529'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");
|
--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23212529' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e");
|
||||||
--bs-accordion-btn-icon-width: 1.25rem;
|
--bs-accordion-btn-icon-width: 1.25rem;
|
||||||
--bs-accordion-btn-icon-transform: rotate(-180deg);
|
--bs-accordion-btn-icon-transform: rotate(-180deg);
|
||||||
--bs-accordion-btn-icon-transition: transform 0.2s ease-in-out;
|
--bs-accordion-btn-icon-transition: transform 0.2s ease-in-out;
|
||||||
--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23052c65'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");
|
--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23052c65' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e");
|
||||||
--bs-accordion-btn-focus-border-color: #86b7fe;
|
|
||||||
--bs-accordion-btn-focus-box-shadow: 0 0 0 0.25rem rgba(13, 110, 253, 0.25);
|
--bs-accordion-btn-focus-box-shadow: 0 0 0 0.25rem rgba(13, 110, 253, 0.25);
|
||||||
--bs-accordion-body-padding-x: 1.25rem;
|
--bs-accordion-body-padding-x: 1.25rem;
|
||||||
--bs-accordion-body-padding-y: 1rem;
|
--bs-accordion-body-padding-y: 1rem;
|
||||||
@@ -4631,7 +4638,6 @@ textarea.form-control-lg {
|
|||||||
}
|
}
|
||||||
.accordion-button:focus {
|
.accordion-button:focus {
|
||||||
z-index: 3;
|
z-index: 3;
|
||||||
border-color: var(--bs-accordion-btn-focus-border-color);
|
|
||||||
outline: 0;
|
outline: 0;
|
||||||
box-shadow: var(--bs-accordion-btn-focus-box-shadow);
|
box-shadow: var(--bs-accordion-btn-focus-box-shadow);
|
||||||
}
|
}
|
||||||
@@ -4649,7 +4655,7 @@ textarea.form-control-lg {
|
|||||||
border-top-left-radius: var(--bs-accordion-border-radius);
|
border-top-left-radius: var(--bs-accordion-border-radius);
|
||||||
border-top-right-radius: var(--bs-accordion-border-radius);
|
border-top-right-radius: var(--bs-accordion-border-radius);
|
||||||
}
|
}
|
||||||
.accordion-item:first-of-type .accordion-button {
|
.accordion-item:first-of-type > .accordion-header .accordion-button {
|
||||||
border-top-left-radius: var(--bs-accordion-inner-border-radius);
|
border-top-left-radius: var(--bs-accordion-inner-border-radius);
|
||||||
border-top-right-radius: var(--bs-accordion-inner-border-radius);
|
border-top-right-radius: var(--bs-accordion-inner-border-radius);
|
||||||
}
|
}
|
||||||
@@ -4660,11 +4666,11 @@ textarea.form-control-lg {
|
|||||||
border-bottom-right-radius: var(--bs-accordion-border-radius);
|
border-bottom-right-radius: var(--bs-accordion-border-radius);
|
||||||
border-bottom-left-radius: var(--bs-accordion-border-radius);
|
border-bottom-left-radius: var(--bs-accordion-border-radius);
|
||||||
}
|
}
|
||||||
.accordion-item:last-of-type .accordion-button.collapsed {
|
.accordion-item:last-of-type > .accordion-header .accordion-button.collapsed {
|
||||||
border-bottom-right-radius: var(--bs-accordion-inner-border-radius);
|
border-bottom-right-radius: var(--bs-accordion-inner-border-radius);
|
||||||
border-bottom-left-radius: var(--bs-accordion-inner-border-radius);
|
border-bottom-left-radius: var(--bs-accordion-inner-border-radius);
|
||||||
}
|
}
|
||||||
.accordion-item:last-of-type .accordion-collapse {
|
.accordion-item:last-of-type > .accordion-collapse {
|
||||||
border-bottom-right-radius: var(--bs-accordion-border-radius);
|
border-bottom-right-radius: var(--bs-accordion-border-radius);
|
||||||
border-bottom-left-radius: var(--bs-accordion-border-radius);
|
border-bottom-left-radius: var(--bs-accordion-border-radius);
|
||||||
}
|
}
|
||||||
@@ -4673,21 +4679,21 @@ textarea.form-control-lg {
|
|||||||
padding: var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x);
|
padding: var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x);
|
||||||
}
|
}
|
||||||
|
|
||||||
.accordion-flush .accordion-collapse {
|
.accordion-flush > .accordion-item {
|
||||||
border-width: 0;
|
|
||||||
}
|
|
||||||
.accordion-flush .accordion-item {
|
|
||||||
border-right: 0;
|
border-right: 0;
|
||||||
border-left: 0;
|
border-left: 0;
|
||||||
border-radius: 0;
|
border-radius: 0;
|
||||||
}
|
}
|
||||||
.accordion-flush .accordion-item:first-child {
|
.accordion-flush > .accordion-item:first-child {
|
||||||
border-top: 0;
|
border-top: 0;
|
||||||
}
|
}
|
||||||
.accordion-flush .accordion-item:last-child {
|
.accordion-flush > .accordion-item:last-child {
|
||||||
border-bottom: 0;
|
border-bottom: 0;
|
||||||
}
|
}
|
||||||
.accordion-flush .accordion-item .accordion-button, .accordion-flush .accordion-item .accordion-button.collapsed {
|
.accordion-flush > .accordion-item > .accordion-header .accordion-button, .accordion-flush > .accordion-item > .accordion-header .accordion-button.collapsed {
|
||||||
|
border-radius: 0;
|
||||||
|
}
|
||||||
|
.accordion-flush > .accordion-item > .accordion-collapse {
|
||||||
border-radius: 0;
|
border-radius: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5473,7 +5479,7 @@ textarea.form-control-lg {
|
|||||||
--bs-modal-border-color: var(--bs-border-color-translucent);
|
--bs-modal-border-color: var(--bs-border-color-translucent);
|
||||||
--bs-modal-border-width: var(--bs-border-width);
|
--bs-modal-border-width: var(--bs-border-width);
|
||||||
--bs-modal-border-radius: var(--bs-border-radius-lg);
|
--bs-modal-border-radius: var(--bs-border-radius-lg);
|
||||||
--bs-modal-box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);
|
--bs-modal-box-shadow: var(--bs-box-shadow-sm);
|
||||||
--bs-modal-inner-border-radius: calc(var(--bs-border-radius-lg) - (var(--bs-border-width)));
|
--bs-modal-inner-border-radius: calc(var(--bs-border-radius-lg) - (var(--bs-border-width)));
|
||||||
--bs-modal-header-padding-x: 1rem;
|
--bs-modal-header-padding-x: 1rem;
|
||||||
--bs-modal-header-padding-y: 1rem;
|
--bs-modal-header-padding-y: 1rem;
|
||||||
@@ -5573,7 +5579,6 @@ textarea.form-control-lg {
|
|||||||
display: flex;
|
display: flex;
|
||||||
flex-shrink: 0;
|
flex-shrink: 0;
|
||||||
align-items: center;
|
align-items: center;
|
||||||
justify-content: space-between;
|
|
||||||
padding: var(--bs-modal-header-padding);
|
padding: var(--bs-modal-header-padding);
|
||||||
border-bottom: var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color);
|
border-bottom: var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color);
|
||||||
border-top-left-radius: var(--bs-modal-inner-border-radius);
|
border-top-left-radius: var(--bs-modal-inner-border-radius);
|
||||||
@@ -5614,7 +5619,7 @@ textarea.form-control-lg {
|
|||||||
@media (min-width: 576px) {
|
@media (min-width: 576px) {
|
||||||
.modal {
|
.modal {
|
||||||
--bs-modal-margin: 1.75rem;
|
--bs-modal-margin: 1.75rem;
|
||||||
--bs-modal-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15);
|
--bs-modal-box-shadow: var(--bs-box-shadow);
|
||||||
}
|
}
|
||||||
.modal-dialog {
|
.modal-dialog {
|
||||||
max-width: var(--bs-modal-width);
|
max-width: var(--bs-modal-width);
|
||||||
@@ -5866,7 +5871,7 @@ textarea.form-control-lg {
|
|||||||
--bs-popover-border-color: var(--bs-border-color-translucent);
|
--bs-popover-border-color: var(--bs-border-color-translucent);
|
||||||
--bs-popover-border-radius: var(--bs-border-radius-lg);
|
--bs-popover-border-radius: var(--bs-border-radius-lg);
|
||||||
--bs-popover-inner-border-radius: calc(var(--bs-border-radius-lg) - var(--bs-border-width));
|
--bs-popover-inner-border-radius: calc(var(--bs-border-radius-lg) - var(--bs-border-width));
|
||||||
--bs-popover-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15);
|
--bs-popover-box-shadow: var(--bs-box-shadow);
|
||||||
--bs-popover-header-padding-x: 1rem;
|
--bs-popover-header-padding-x: 1rem;
|
||||||
--bs-popover-header-padding-y: 0.5rem;
|
--bs-popover-header-padding-y: 0.5rem;
|
||||||
--bs-popover-header-font-size: 1rem;
|
--bs-popover-header-font-size: 1rem;
|
||||||
@@ -6139,20 +6144,12 @@ textarea.form-control-lg {
|
|||||||
background-size: 100% 100%;
|
background-size: 100% 100%;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* rtl:options: {
|
|
||||||
"autoRename": true,
|
|
||||||
"stringMap":[ {
|
|
||||||
"name" : "prev-next",
|
|
||||||
"search" : "prev",
|
|
||||||
"replace" : "next"
|
|
||||||
} ]
|
|
||||||
} */
|
|
||||||
.carousel-control-prev-icon {
|
.carousel-control-prev-icon {
|
||||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e");
|
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")*/;
|
||||||
}
|
}
|
||||||
|
|
||||||
.carousel-control-next-icon {
|
.carousel-control-next-icon {
|
||||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");
|
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")*/;
|
||||||
}
|
}
|
||||||
|
|
||||||
.carousel-indicators {
|
.carousel-indicators {
|
||||||
@@ -6301,7 +6298,7 @@ textarea.form-control-lg {
|
|||||||
--bs-offcanvas-bg: var(--bs-body-bg);
|
--bs-offcanvas-bg: var(--bs-body-bg);
|
||||||
--bs-offcanvas-border-width: var(--bs-border-width);
|
--bs-offcanvas-border-width: var(--bs-border-width);
|
||||||
--bs-offcanvas-border-color: var(--bs-border-color-translucent);
|
--bs-offcanvas-border-color: var(--bs-border-color-translucent);
|
||||||
--bs-offcanvas-box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);
|
--bs-offcanvas-box-shadow: var(--bs-box-shadow-sm);
|
||||||
--bs-offcanvas-transition: transform 0.3s ease-in-out;
|
--bs-offcanvas-transition: transform 0.3s ease-in-out;
|
||||||
--bs-offcanvas-title-line-height: 1.5;
|
--bs-offcanvas-title-line-height: 1.5;
|
||||||
}
|
}
|
||||||
@@ -6772,14 +6769,11 @@ textarea.form-control-lg {
|
|||||||
.offcanvas-header {
|
.offcanvas-header {
|
||||||
display: flex;
|
display: flex;
|
||||||
align-items: center;
|
align-items: center;
|
||||||
justify-content: space-between;
|
|
||||||
padding: var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x);
|
padding: var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x);
|
||||||
}
|
}
|
||||||
.offcanvas-header .btn-close {
|
.offcanvas-header .btn-close {
|
||||||
padding: calc(var(--bs-offcanvas-padding-y) * 0.5) calc(var(--bs-offcanvas-padding-x) * 0.5);
|
padding: calc(var(--bs-offcanvas-padding-y) * 0.5) calc(var(--bs-offcanvas-padding-x) * 0.5);
|
||||||
margin-top: calc(-0.5 * var(--bs-offcanvas-padding-y));
|
margin: calc(-0.5 * var(--bs-offcanvas-padding-y)) calc(-0.5 * var(--bs-offcanvas-padding-x)) calc(-0.5 * var(--bs-offcanvas-padding-y)) auto;
|
||||||
margin-right: calc(-0.5 * var(--bs-offcanvas-padding-x));
|
|
||||||
margin-bottom: calc(-0.5 * var(--bs-offcanvas-padding-y));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.offcanvas-title {
|
.offcanvas-title {
|
||||||
@@ -7380,15 +7374,15 @@ textarea.form-control-lg {
|
|||||||
}
|
}
|
||||||
|
|
||||||
.shadow {
|
.shadow {
|
||||||
box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15) !important;
|
box-shadow: var(--bs-box-shadow) !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
.shadow-sm {
|
.shadow-sm {
|
||||||
box-shadow: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075) !important;
|
box-shadow: var(--bs-box-shadow-sm) !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
.shadow-lg {
|
.shadow-lg {
|
||||||
box-shadow: 0 1rem 3rem rgba(0, 0, 0, 0.175) !important;
|
box-shadow: var(--bs-box-shadow-lg) !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
.shadow-none {
|
.shadow-none {
|
||||||
@@ -12059,3 +12053,5 @@ textarea.form-control-lg {
|
|||||||
display: none !important;
|
display: none !important;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*# sourceMappingURL=bootstrap.css.map */
|
||||||
358
src/static/scripts/datatables.css
vendored
358
src/static/scripts/datatables.css
vendored
@@ -4,10 +4,10 @@
|
|||||||
*
|
*
|
||||||
* To rebuild or modify this file with the latest versions of the included
|
* To rebuild or modify this file with the latest versions of the included
|
||||||
* software please visit:
|
* software please visit:
|
||||||
* https://datatables.net/download/#bs5/dt-1.13.6
|
* https://datatables.net/download/#bs5/dt-2.0.7
|
||||||
*
|
*
|
||||||
* Included libraries:
|
* Included libraries:
|
||||||
* DataTables 1.13.6
|
* DataTables 2.0.7
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@charset "UTF-8";
|
@charset "UTF-8";
|
||||||
@@ -30,76 +30,124 @@ table.dataTable td.dt-control {
|
|||||||
}
|
}
|
||||||
table.dataTable td.dt-control:before {
|
table.dataTable td.dt-control:before {
|
||||||
display: inline-block;
|
display: inline-block;
|
||||||
color: rgba(0, 0, 0, 0.5);
|
box-sizing: border-box;
|
||||||
content: "►";
|
content: "";
|
||||||
|
border-top: 5px solid transparent;
|
||||||
|
border-left: 10px solid rgba(0, 0, 0, 0.5);
|
||||||
|
border-bottom: 5px solid transparent;
|
||||||
|
border-right: 0px solid transparent;
|
||||||
}
|
}
|
||||||
table.dataTable tr.dt-hasChild td.dt-control:before {
|
table.dataTable tr.dt-hasChild td.dt-control:before {
|
||||||
content: "▼";
|
border-top: 10px solid rgba(0, 0, 0, 0.5);
|
||||||
|
border-left: 5px solid transparent;
|
||||||
|
border-bottom: 0px solid transparent;
|
||||||
|
border-right: 5px solid transparent;
|
||||||
}
|
}
|
||||||
|
|
||||||
html.dark table.dataTable td.dt-control:before {
|
html.dark table.dataTable td.dt-control:before,
|
||||||
color: rgba(255, 255, 255, 0.5);
|
:root[data-bs-theme=dark] table.dataTable td.dt-control:before {
|
||||||
|
border-left-color: rgba(255, 255, 255, 0.5);
|
||||||
}
|
}
|
||||||
html.dark table.dataTable tr.dt-hasChild td.dt-control:before {
|
html.dark table.dataTable tr.dt-hasChild td.dt-control:before,
|
||||||
color: rgba(255, 255, 255, 0.5);
|
:root[data-bs-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before {
|
||||||
|
border-top-color: rgba(255, 255, 255, 0.5);
|
||||||
|
border-left-color: transparent;
|
||||||
}
|
}
|
||||||
|
|
||||||
table.dataTable thead > tr > th.sorting, table.dataTable thead > tr > th.sorting_asc, table.dataTable thead > tr > th.sorting_desc, table.dataTable thead > tr > th.sorting_asc_disabled, table.dataTable thead > tr > th.sorting_desc_disabled,
|
div.dt-scroll-body thead tr,
|
||||||
table.dataTable thead > tr > td.sorting,
|
div.dt-scroll-body tfoot tr {
|
||||||
table.dataTable thead > tr > td.sorting_asc,
|
height: 0;
|
||||||
table.dataTable thead > tr > td.sorting_desc,
|
|
||||||
table.dataTable thead > tr > td.sorting_asc_disabled,
|
|
||||||
table.dataTable thead > tr > td.sorting_desc_disabled {
|
|
||||||
cursor: pointer;
|
|
||||||
position: relative;
|
|
||||||
padding-right: 26px;
|
|
||||||
}
|
}
|
||||||
table.dataTable thead > tr > th.sorting:before, table.dataTable thead > tr > th.sorting:after, table.dataTable thead > tr > th.sorting_asc:before, table.dataTable thead > tr > th.sorting_asc:after, table.dataTable thead > tr > th.sorting_desc:before, table.dataTable thead > tr > th.sorting_desc:after, table.dataTable thead > tr > th.sorting_asc_disabled:before, table.dataTable thead > tr > th.sorting_asc_disabled:after, table.dataTable thead > tr > th.sorting_desc_disabled:before, table.dataTable thead > tr > th.sorting_desc_disabled:after,
|
div.dt-scroll-body thead tr th, div.dt-scroll-body thead tr td,
|
||||||
table.dataTable thead > tr > td.sorting:before,
|
div.dt-scroll-body tfoot tr th,
|
||||||
table.dataTable thead > tr > td.sorting:after,
|
div.dt-scroll-body tfoot tr td {
|
||||||
table.dataTable thead > tr > td.sorting_asc:before,
|
height: 0 !important;
|
||||||
table.dataTable thead > tr > td.sorting_asc:after,
|
padding-top: 0px !important;
|
||||||
table.dataTable thead > tr > td.sorting_desc:before,
|
padding-bottom: 0px !important;
|
||||||
table.dataTable thead > tr > td.sorting_desc:after,
|
border-top-width: 0px !important;
|
||||||
table.dataTable thead > tr > td.sorting_asc_disabled:before,
|
border-bottom-width: 0px !important;
|
||||||
table.dataTable thead > tr > td.sorting_asc_disabled:after,
|
}
|
||||||
table.dataTable thead > tr > td.sorting_desc_disabled:before,
|
div.dt-scroll-body thead tr th div.dt-scroll-sizing, div.dt-scroll-body thead tr td div.dt-scroll-sizing,
|
||||||
table.dataTable thead > tr > td.sorting_desc_disabled:after {
|
div.dt-scroll-body tfoot tr th div.dt-scroll-sizing,
|
||||||
|
div.dt-scroll-body tfoot tr td div.dt-scroll-sizing {
|
||||||
|
height: 0 !important;
|
||||||
|
overflow: hidden !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.dataTable thead > tr > th:active,
|
||||||
|
table.dataTable thead > tr > td:active {
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:before,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
display: block;
|
display: block;
|
||||||
opacity: 0.125;
|
|
||||||
right: 10px;
|
|
||||||
line-height: 9px;
|
|
||||||
font-size: 0.8em;
|
|
||||||
}
|
|
||||||
table.dataTable thead > tr > th.sorting:before, table.dataTable thead > tr > th.sorting_asc:before, table.dataTable thead > tr > th.sorting_desc:before, table.dataTable thead > tr > th.sorting_asc_disabled:before, table.dataTable thead > tr > th.sorting_desc_disabled:before,
|
|
||||||
table.dataTable thead > tr > td.sorting:before,
|
|
||||||
table.dataTable thead > tr > td.sorting_asc:before,
|
|
||||||
table.dataTable thead > tr > td.sorting_desc:before,
|
|
||||||
table.dataTable thead > tr > td.sorting_asc_disabled:before,
|
|
||||||
table.dataTable thead > tr > td.sorting_desc_disabled:before {
|
|
||||||
bottom: 50%;
|
bottom: 50%;
|
||||||
content: "▲";
|
content: "▲";
|
||||||
content: "▲"/"";
|
content: "▲"/"";
|
||||||
}
|
}
|
||||||
table.dataTable thead > tr > th.sorting:after, table.dataTable thead > tr > th.sorting_asc:after, table.dataTable thead > tr > th.sorting_desc:after, table.dataTable thead > tr > th.sorting_asc_disabled:after, table.dataTable thead > tr > th.sorting_desc_disabled:after,
|
table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
|
||||||
table.dataTable thead > tr > td.sorting:after,
|
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:after,
|
||||||
table.dataTable thead > tr > td.sorting_asc:after,
|
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
|
||||||
table.dataTable thead > tr > td.sorting_desc:after,
|
position: absolute;
|
||||||
table.dataTable thead > tr > td.sorting_asc_disabled:after,
|
display: block;
|
||||||
table.dataTable thead > tr > td.sorting_desc_disabled:after {
|
|
||||||
top: 50%;
|
top: 50%;
|
||||||
content: "▼";
|
content: "▼";
|
||||||
content: "▼"/"";
|
content: "▼"/"";
|
||||||
}
|
}
|
||||||
table.dataTable thead > tr > th.sorting_asc:before, table.dataTable thead > tr > th.sorting_desc:after,
|
table.dataTable thead > tr > th.dt-orderable-asc, table.dataTable thead > tr > th.dt-orderable-desc, table.dataTable thead > tr > th.dt-ordering-asc, table.dataTable thead > tr > th.dt-ordering-desc,
|
||||||
table.dataTable thead > tr > td.sorting_asc:before,
|
table.dataTable thead > tr > td.dt-orderable-asc,
|
||||||
table.dataTable thead > tr > td.sorting_desc:after {
|
table.dataTable thead > tr > td.dt-orderable-desc,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-asc,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-desc {
|
||||||
|
position: relative;
|
||||||
|
padding-right: 30px;
|
||||||
|
}
|
||||||
|
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order {
|
||||||
|
position: absolute;
|
||||||
|
right: 12px;
|
||||||
|
top: 0;
|
||||||
|
bottom: 0;
|
||||||
|
width: 12px;
|
||||||
|
}
|
||||||
|
table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-asc span.dt-column-order:after, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:before, table.dataTable thead > tr > th.dt-orderable-desc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:after, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:before,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-asc span.dt-column-order:after,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:before,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-desc span.dt-column-order:after,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:after,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:before,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
|
||||||
|
left: 0;
|
||||||
|
opacity: 0.125;
|
||||||
|
line-height: 9px;
|
||||||
|
font-size: 0.8em;
|
||||||
|
}
|
||||||
|
table.dataTable thead > tr > th.dt-orderable-asc, table.dataTable thead > tr > th.dt-orderable-desc,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-asc,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-desc {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
table.dataTable thead > tr > th.dt-orderable-asc:hover, table.dataTable thead > tr > th.dt-orderable-desc:hover,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-asc:hover,
|
||||||
|
table.dataTable thead > tr > td.dt-orderable-desc:hover {
|
||||||
|
outline: 2px solid rgba(0, 0, 0, 0.05);
|
||||||
|
outline-offset: -2px;
|
||||||
|
}
|
||||||
|
table.dataTable thead > tr > th.dt-ordering-asc span.dt-column-order:before, table.dataTable thead > tr > th.dt-ordering-desc span.dt-column-order:after,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-asc span.dt-column-order:before,
|
||||||
|
table.dataTable thead > tr > td.dt-ordering-desc span.dt-column-order:after {
|
||||||
opacity: 0.6;
|
opacity: 0.6;
|
||||||
}
|
}
|
||||||
table.dataTable thead > tr > th.sorting_desc_disabled:after, table.dataTable thead > tr > th.sorting_asc_disabled:before,
|
table.dataTable thead > tr > th.sorting_desc_disabled span.dt-column-order:after, table.dataTable thead > tr > th.sorting_asc_disabled span.dt-column-order:before,
|
||||||
table.dataTable thead > tr > td.sorting_desc_disabled:after,
|
table.dataTable thead > tr > td.sorting_desc_disabled span.dt-column-order:after,
|
||||||
table.dataTable thead > tr > td.sorting_asc_disabled:before {
|
table.dataTable thead > tr > td.sorting_asc_disabled span.dt-column-order:before {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
table.dataTable thead > tr > th:active,
|
table.dataTable thead > tr > th:active,
|
||||||
@@ -107,29 +155,39 @@ table.dataTable thead > tr > td:active {
|
|||||||
outline: none;
|
outline: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
div.dataTables_scrollBody > table.dataTable > thead > tr > th:before, div.dataTables_scrollBody > table.dataTable > thead > tr > th:after,
|
div.dt-scroll-body > table.dataTable > thead > tr > th,
|
||||||
div.dataTables_scrollBody > table.dataTable > thead > tr > td:before,
|
div.dt-scroll-body > table.dataTable > thead > tr > td {
|
||||||
div.dataTables_scrollBody > table.dataTable > thead > tr > td:after {
|
overflow: hidden;
|
||||||
display: none;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
div.dataTables_processing {
|
:root.dark table.dataTable thead > tr > th.dt-orderable-asc:hover, :root.dark table.dataTable thead > tr > th.dt-orderable-desc:hover,
|
||||||
|
:root.dark table.dataTable thead > tr > td.dt-orderable-asc:hover,
|
||||||
|
:root.dark table.dataTable thead > tr > td.dt-orderable-desc:hover,
|
||||||
|
:root[data-bs-theme=dark] table.dataTable thead > tr > th.dt-orderable-asc:hover,
|
||||||
|
:root[data-bs-theme=dark] table.dataTable thead > tr > th.dt-orderable-desc:hover,
|
||||||
|
:root[data-bs-theme=dark] table.dataTable thead > tr > td.dt-orderable-asc:hover,
|
||||||
|
:root[data-bs-theme=dark] table.dataTable thead > tr > td.dt-orderable-desc:hover {
|
||||||
|
outline: 2px solid rgba(255, 255, 255, 0.05);
|
||||||
|
}
|
||||||
|
|
||||||
|
div.dt-processing {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
top: 50%;
|
top: 50%;
|
||||||
left: 50%;
|
left: 50%;
|
||||||
width: 200px;
|
width: 200px;
|
||||||
margin-left: -100px;
|
margin-left: -100px;
|
||||||
margin-top: -26px;
|
margin-top: -22px;
|
||||||
text-align: center;
|
text-align: center;
|
||||||
padding: 2px;
|
padding: 2px;
|
||||||
|
z-index: 10;
|
||||||
}
|
}
|
||||||
div.dataTables_processing > div:last-child {
|
div.dt-processing > div:last-child {
|
||||||
position: relative;
|
position: relative;
|
||||||
width: 80px;
|
width: 80px;
|
||||||
height: 15px;
|
height: 15px;
|
||||||
margin: 1em auto;
|
margin: 1em auto;
|
||||||
}
|
}
|
||||||
div.dataTables_processing > div:last-child > div {
|
div.dt-processing > div:last-child > div {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
top: 0;
|
top: 0;
|
||||||
width: 13px;
|
width: 13px;
|
||||||
@@ -139,19 +197,19 @@ div.dataTables_processing > div:last-child > div {
|
|||||||
background: rgb(var(--dt-row-selected));
|
background: rgb(var(--dt-row-selected));
|
||||||
animation-timing-function: cubic-bezier(0, 1, 1, 0);
|
animation-timing-function: cubic-bezier(0, 1, 1, 0);
|
||||||
}
|
}
|
||||||
div.dataTables_processing > div:last-child > div:nth-child(1) {
|
div.dt-processing > div:last-child > div:nth-child(1) {
|
||||||
left: 8px;
|
left: 8px;
|
||||||
animation: datatables-loader-1 0.6s infinite;
|
animation: datatables-loader-1 0.6s infinite;
|
||||||
}
|
}
|
||||||
div.dataTables_processing > div:last-child > div:nth-child(2) {
|
div.dt-processing > div:last-child > div:nth-child(2) {
|
||||||
left: 8px;
|
left: 8px;
|
||||||
animation: datatables-loader-2 0.6s infinite;
|
animation: datatables-loader-2 0.6s infinite;
|
||||||
}
|
}
|
||||||
div.dataTables_processing > div:last-child > div:nth-child(3) {
|
div.dt-processing > div:last-child > div:nth-child(3) {
|
||||||
left: 32px;
|
left: 32px;
|
||||||
animation: datatables-loader-2 0.6s infinite;
|
animation: datatables-loader-2 0.6s infinite;
|
||||||
}
|
}
|
||||||
div.dataTables_processing > div:last-child > div:nth-child(4) {
|
div.dt-processing > div:last-child > div:nth-child(4) {
|
||||||
left: 56px;
|
left: 56px;
|
||||||
animation: datatables-loader-3 0.6s infinite;
|
animation: datatables-loader-3 0.6s infinite;
|
||||||
}
|
}
|
||||||
@@ -183,13 +241,16 @@ div.dataTables_processing > div:last-child > div:nth-child(4) {
|
|||||||
table.dataTable.nowrap th, table.dataTable.nowrap td {
|
table.dataTable.nowrap th, table.dataTable.nowrap td {
|
||||||
white-space: nowrap;
|
white-space: nowrap;
|
||||||
}
|
}
|
||||||
|
table.dataTable th,
|
||||||
|
table.dataTable td {
|
||||||
|
box-sizing: border-box;
|
||||||
|
}
|
||||||
table.dataTable th.dt-left,
|
table.dataTable th.dt-left,
|
||||||
table.dataTable td.dt-left {
|
table.dataTable td.dt-left {
|
||||||
text-align: left;
|
text-align: left;
|
||||||
}
|
}
|
||||||
table.dataTable th.dt-center,
|
table.dataTable th.dt-center,
|
||||||
table.dataTable td.dt-center,
|
table.dataTable td.dt-center {
|
||||||
table.dataTable td.dataTables_empty {
|
|
||||||
text-align: center;
|
text-align: center;
|
||||||
}
|
}
|
||||||
table.dataTable th.dt-right,
|
table.dataTable th.dt-right,
|
||||||
@@ -204,6 +265,16 @@ table.dataTable th.dt-nowrap,
|
|||||||
table.dataTable td.dt-nowrap {
|
table.dataTable td.dt-nowrap {
|
||||||
white-space: nowrap;
|
white-space: nowrap;
|
||||||
}
|
}
|
||||||
|
table.dataTable th.dt-empty,
|
||||||
|
table.dataTable td.dt-empty {
|
||||||
|
text-align: center;
|
||||||
|
vertical-align: top;
|
||||||
|
}
|
||||||
|
table.dataTable th.dt-type-numeric, table.dataTable th.dt-type-date,
|
||||||
|
table.dataTable td.dt-type-numeric,
|
||||||
|
table.dataTable td.dt-type-date {
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
table.dataTable thead th,
|
table.dataTable thead th,
|
||||||
table.dataTable thead td,
|
table.dataTable thead td,
|
||||||
table.dataTable tfoot th,
|
table.dataTable tfoot th,
|
||||||
@@ -266,179 +337,158 @@ table.dataTable tbody td.dt-body-nowrap {
|
|||||||
* ©2020 SpryMedia Ltd, all rights reserved.
|
* ©2020 SpryMedia Ltd, all rights reserved.
|
||||||
* License: MIT datatables.net/license/mit
|
* License: MIT datatables.net/license/mit
|
||||||
*/
|
*/
|
||||||
table.dataTable {
|
table.table.dataTable {
|
||||||
clear: both;
|
clear: both;
|
||||||
margin-top: 6px !important;
|
margin-bottom: 0;
|
||||||
margin-bottom: 6px !important;
|
max-width: none;
|
||||||
max-width: none !important;
|
|
||||||
border-collapse: separate !important;
|
|
||||||
border-spacing: 0;
|
border-spacing: 0;
|
||||||
}
|
}
|
||||||
table.dataTable td,
|
table.table.dataTable.table-striped > tbody > tr:nth-of-type(2n+1) > * {
|
||||||
table.dataTable th {
|
|
||||||
-webkit-box-sizing: content-box;
|
|
||||||
box-sizing: content-box;
|
|
||||||
}
|
|
||||||
table.dataTable td.dataTables_empty,
|
|
||||||
table.dataTable th.dataTables_empty {
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
table.dataTable.nowrap th,
|
|
||||||
table.dataTable.nowrap td {
|
|
||||||
white-space: nowrap;
|
|
||||||
}
|
|
||||||
table.dataTable.table-striped > tbody > tr:nth-of-type(2n+1) > * {
|
|
||||||
box-shadow: none;
|
box-shadow: none;
|
||||||
}
|
}
|
||||||
table.dataTable > tbody > tr {
|
table.table.dataTable > :not(caption) > * > * {
|
||||||
|
background-color: var(--bs-table-bg);
|
||||||
|
}
|
||||||
|
table.table.dataTable > tbody > tr {
|
||||||
background-color: transparent;
|
background-color: transparent;
|
||||||
}
|
}
|
||||||
table.dataTable > tbody > tr.selected > * {
|
table.table.dataTable > tbody > tr.selected > * {
|
||||||
box-shadow: inset 0 0 0 9999px rgb(13, 110, 253);
|
box-shadow: inset 0 0 0 9999px rgb(13, 110, 253);
|
||||||
box-shadow: inset 0 0 0 9999px rgb(var(--dt-row-selected));
|
box-shadow: inset 0 0 0 9999px rgb(var(--dt-row-selected));
|
||||||
color: rgb(255, 255, 255);
|
color: rgb(255, 255, 255);
|
||||||
color: rgb(var(--dt-row-selected-text));
|
color: rgb(var(--dt-row-selected-text));
|
||||||
}
|
}
|
||||||
table.dataTable > tbody > tr.selected a {
|
table.table.dataTable > tbody > tr.selected a {
|
||||||
color: rgb(9, 10, 11);
|
color: rgb(9, 10, 11);
|
||||||
color: rgb(var(--dt-row-selected-link));
|
color: rgb(var(--dt-row-selected-link));
|
||||||
}
|
}
|
||||||
table.dataTable.table-striped > tbody > tr.odd > * {
|
table.table.dataTable.table-striped > tbody > tr:nth-of-type(2n+1) > * {
|
||||||
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-stripe), 0.05);
|
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-stripe), 0.05);
|
||||||
}
|
}
|
||||||
table.dataTable.table-striped > tbody > tr.odd.selected > * {
|
table.table.dataTable.table-striped > tbody > tr:nth-of-type(2n+1).selected > * {
|
||||||
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.95);
|
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.95);
|
||||||
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.95);
|
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.95);
|
||||||
}
|
}
|
||||||
table.dataTable.table-hover > tbody > tr:hover > * {
|
table.table.dataTable.table-hover > tbody > tr:hover > * {
|
||||||
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-hover), 0.075);
|
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-hover), 0.075);
|
||||||
}
|
}
|
||||||
table.dataTable.table-hover > tbody > tr.selected:hover > * {
|
table.table.dataTable.table-hover > tbody > tr.selected:hover > * {
|
||||||
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.975);
|
box-shadow: inset 0 0 0 9999px rgba(13, 110, 253, 0.975);
|
||||||
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.975);
|
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.975);
|
||||||
}
|
}
|
||||||
|
|
||||||
div.dataTables_wrapper div.dataTables_length label {
|
div.dt-container div.dt-length label {
|
||||||
font-weight: normal;
|
font-weight: normal;
|
||||||
text-align: left;
|
text-align: left;
|
||||||
white-space: nowrap;
|
white-space: nowrap;
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dataTables_length select {
|
div.dt-container div.dt-length select {
|
||||||
width: auto;
|
width: auto;
|
||||||
display: inline-block;
|
display: inline-block;
|
||||||
|
margin-right: 0.5em;
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dataTables_filter {
|
div.dt-container div.dt-search {
|
||||||
text-align: right;
|
text-align: right;
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dataTables_filter label {
|
div.dt-container div.dt-search label {
|
||||||
font-weight: normal;
|
font-weight: normal;
|
||||||
white-space: nowrap;
|
white-space: nowrap;
|
||||||
text-align: left;
|
text-align: left;
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dataTables_filter input {
|
div.dt-container div.dt-search input {
|
||||||
margin-left: 0.5em;
|
margin-left: 0.5em;
|
||||||
display: inline-block;
|
display: inline-block;
|
||||||
width: auto;
|
width: auto;
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dataTables_info {
|
div.dt-container div.dt-info {
|
||||||
padding-top: 0.85em;
|
padding-top: 0.85em;
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dataTables_paginate {
|
div.dt-container div.dt-paging {
|
||||||
margin: 0;
|
margin: 0;
|
||||||
white-space: nowrap;
|
|
||||||
text-align: right;
|
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dataTables_paginate ul.pagination {
|
div.dt-container div.dt-paging ul.pagination {
|
||||||
margin: 2px 0;
|
margin: 2px 0;
|
||||||
white-space: nowrap;
|
flex-wrap: wrap;
|
||||||
justify-content: flex-end;
|
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dt-row {
|
div.dt-container div.dt-row {
|
||||||
position: relative;
|
position: relative;
|
||||||
}
|
}
|
||||||
|
|
||||||
div.dataTables_scrollHead table.dataTable {
|
div.dt-scroll-head table.dataTable {
|
||||||
margin-bottom: 0 !important;
|
margin-bottom: 0 !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
div.dataTables_scrollBody > table {
|
div.dt-scroll-body {
|
||||||
|
border-bottom-color: var(--bs-border-color);
|
||||||
|
border-bottom-width: var(--bs-border-width);
|
||||||
|
border-bottom-style: solid;
|
||||||
|
}
|
||||||
|
div.dt-scroll-body > table {
|
||||||
border-top: none;
|
border-top: none;
|
||||||
margin-top: 0 !important;
|
margin-top: 0 !important;
|
||||||
margin-bottom: 0 !important;
|
margin-bottom: 0 !important;
|
||||||
}
|
}
|
||||||
div.dataTables_scrollBody > table > thead .sorting:before,
|
div.dt-scroll-body > table > tbody > tr:first-child {
|
||||||
div.dataTables_scrollBody > table > thead .sorting_asc:before,
|
border-top-width: 0;
|
||||||
div.dataTables_scrollBody > table > thead .sorting_desc:before,
|
|
||||||
div.dataTables_scrollBody > table > thead .sorting:after,
|
|
||||||
div.dataTables_scrollBody > table > thead .sorting_asc:after,
|
|
||||||
div.dataTables_scrollBody > table > thead .sorting_desc:after {
|
|
||||||
display: none;
|
|
||||||
}
|
}
|
||||||
div.dataTables_scrollBody > table > tbody tr:first-child th,
|
div.dt-scroll-body > table > thead > tr {
|
||||||
div.dataTables_scrollBody > table > tbody tr:first-child td {
|
border-width: 0 !important;
|
||||||
border-top: none;
|
}
|
||||||
|
div.dt-scroll-body > table > tbody > tr:last-child > * {
|
||||||
|
border-bottom: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
div.dataTables_scrollFoot > .dataTables_scrollFootInner {
|
div.dt-scroll-foot > .dt-scroll-footInner {
|
||||||
box-sizing: content-box;
|
box-sizing: content-box;
|
||||||
}
|
}
|
||||||
div.dataTables_scrollFoot > .dataTables_scrollFootInner > table {
|
div.dt-scroll-foot > .dt-scroll-footInner > table {
|
||||||
margin-top: 0 !important;
|
margin-top: 0 !important;
|
||||||
border-top: none;
|
border-top: none;
|
||||||
}
|
}
|
||||||
|
div.dt-scroll-foot > .dt-scroll-footInner > table > tfoot > tr:first-child {
|
||||||
|
border-top-width: 0 !important;
|
||||||
|
}
|
||||||
|
|
||||||
@media screen and (max-width: 767px) {
|
@media screen and (max-width: 767px) {
|
||||||
div.dataTables_wrapper div.dataTables_length,
|
div.dt-container div.dt-length,
|
||||||
div.dataTables_wrapper div.dataTables_filter,
|
div.dt-container div.dt-search,
|
||||||
div.dataTables_wrapper div.dataTables_info,
|
div.dt-container div.dt-info,
|
||||||
div.dataTables_wrapper div.dataTables_paginate {
|
div.dt-container div.dt-paging {
|
||||||
text-align: center;
|
text-align: center;
|
||||||
}
|
}
|
||||||
div.dataTables_wrapper div.dataTables_paginate ul.pagination {
|
div.dt-container .row {
|
||||||
|
--bs-gutter-y: 0.5rem;
|
||||||
|
}
|
||||||
|
div.dt-container div.dt-paging ul.pagination {
|
||||||
justify-content: center !important;
|
justify-content: center !important;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
table.dataTable.table-sm > thead > tr > th:not(.sorting_disabled) {
|
table.dataTable.table-sm > thead > tr th.dt-orderable-asc, table.dataTable.table-sm > thead > tr th.dt-orderable-desc, table.dataTable.table-sm > thead > tr th.dt-ordering-asc, table.dataTable.table-sm > thead > tr th.dt-ordering-desc,
|
||||||
|
table.dataTable.table-sm > thead > tr td.dt-orderable-asc,
|
||||||
|
table.dataTable.table-sm > thead > tr td.dt-orderable-desc,
|
||||||
|
table.dataTable.table-sm > thead > tr td.dt-ordering-asc,
|
||||||
|
table.dataTable.table-sm > thead > tr td.dt-ordering-desc {
|
||||||
padding-right: 20px;
|
padding-right: 20px;
|
||||||
}
|
}
|
||||||
|
table.dataTable.table-sm > thead > tr th.dt-orderable-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-orderable-desc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-desc span.dt-column-order,
|
||||||
table.table-bordered.dataTable {
|
table.dataTable.table-sm > thead > tr td.dt-orderable-asc span.dt-column-order,
|
||||||
border-right-width: 0;
|
table.dataTable.table-sm > thead > tr td.dt-orderable-desc span.dt-column-order,
|
||||||
}
|
table.dataTable.table-sm > thead > tr td.dt-ordering-asc span.dt-column-order,
|
||||||
table.table-bordered.dataTable thead tr:first-child th,
|
table.dataTable.table-sm > thead > tr td.dt-ordering-desc span.dt-column-order {
|
||||||
table.table-bordered.dataTable thead tr:first-child td {
|
right: 5px;
|
||||||
border-top-width: 1px;
|
|
||||||
}
|
|
||||||
table.table-bordered.dataTable th,
|
|
||||||
table.table-bordered.dataTable td {
|
|
||||||
border-left-width: 0;
|
|
||||||
}
|
|
||||||
table.table-bordered.dataTable th:first-child, table.table-bordered.dataTable th:first-child,
|
|
||||||
table.table-bordered.dataTable td:first-child,
|
|
||||||
table.table-bordered.dataTable td:first-child {
|
|
||||||
border-left-width: 1px;
|
|
||||||
}
|
|
||||||
table.table-bordered.dataTable th:last-child, table.table-bordered.dataTable th:last-child,
|
|
||||||
table.table-bordered.dataTable td:last-child,
|
|
||||||
table.table-bordered.dataTable td:last-child {
|
|
||||||
border-right-width: 1px;
|
|
||||||
}
|
|
||||||
table.table-bordered.dataTable th,
|
|
||||||
table.table-bordered.dataTable td {
|
|
||||||
border-bottom-width: 1px;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
div.dataTables_scrollHead table.table-bordered {
|
div.dt-scroll-head table.table-bordered {
|
||||||
border-bottom-width: 0;
|
border-bottom-width: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
div.table-responsive > div.dataTables_wrapper > div.row {
|
div.table-responsive > div.dt-container > div.row {
|
||||||
margin: 0;
|
margin: 0;
|
||||||
}
|
}
|
||||||
div.table-responsive > div.dataTables_wrapper > div.row > div[class^=col-]:first-child {
|
div.table-responsive > div.dt-container > div.row > div[class^=col-]:first-child {
|
||||||
padding-left: 0;
|
padding-left: 0;
|
||||||
}
|
}
|
||||||
div.table-responsive > div.dataTables_wrapper > div.row > div[class^=col-]:last-child {
|
div.table-responsive > div.dt-container > div.row > div[class^=col-]:last-child {
|
||||||
padding-right: 0;
|
padding-right: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
12000
src/static/scripts/datatables.js
vendored
12000
src/static/scripts/datatables.js
vendored
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,12 +1,12 @@
|
|||||||
/*!
|
/*!
|
||||||
* jQuery JavaScript Library v3.7.0 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/animatedSelector,-effects/Tween
|
* jQuery JavaScript Library v3.7.1 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/animatedSelector,-effects/Tween
|
||||||
* https://jquery.com/
|
* https://jquery.com/
|
||||||
*
|
*
|
||||||
* Copyright OpenJS Foundation and other contributors
|
* Copyright OpenJS Foundation and other contributors
|
||||||
* Released under the MIT license
|
* Released under the MIT license
|
||||||
* https://jquery.org/license
|
* https://jquery.org/license
|
||||||
*
|
*
|
||||||
* Date: 2023-05-11T18:29Z
|
* Date: 2023-08-28T13:37Z
|
||||||
*/
|
*/
|
||||||
( function( global, factory ) {
|
( function( global, factory ) {
|
||||||
|
|
||||||
@@ -147,7 +147,7 @@ function toType( obj ) {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
var version = "3.7.0 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/animatedSelector,-effects/Tween",
|
var version = "3.7.1 -ajax,-ajax/jsonp,-ajax/load,-ajax/script,-ajax/var/location,-ajax/var/nonce,-ajax/var/rquery,-ajax/xhr,-manipulation/_evalUrl,-deprecated/ajax-event-alias,-effects,-effects/animatedSelector,-effects/Tween",
|
||||||
|
|
||||||
rhtmlSuffix = /HTML$/i,
|
rhtmlSuffix = /HTML$/i,
|
||||||
|
|
||||||
@@ -411,9 +411,14 @@ jQuery.extend( {
|
|||||||
// Do not traverse comment nodes
|
// Do not traverse comment nodes
|
||||||
ret += jQuery.text( node );
|
ret += jQuery.text( node );
|
||||||
}
|
}
|
||||||
} else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
|
}
|
||||||
|
if ( nodeType === 1 || nodeType === 11 ) {
|
||||||
return elem.textContent;
|
return elem.textContent;
|
||||||
} else if ( nodeType === 3 || nodeType === 4 ) {
|
}
|
||||||
|
if ( nodeType === 9 ) {
|
||||||
|
return elem.documentElement.textContent;
|
||||||
|
}
|
||||||
|
if ( nodeType === 3 || nodeType === 4 ) {
|
||||||
return elem.nodeValue;
|
return elem.nodeValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1126,12 +1131,17 @@ function setDocument( node ) {
|
|||||||
documentElement.msMatchesSelector;
|
documentElement.msMatchesSelector;
|
||||||
|
|
||||||
// Support: IE 9 - 11+, Edge 12 - 18+
|
// Support: IE 9 - 11+, Edge 12 - 18+
|
||||||
// Accessing iframe documents after unload throws "permission denied" errors (see trac-13936)
|
// Accessing iframe documents after unload throws "permission denied" errors
|
||||||
// Support: IE 11+, Edge 17 - 18+
|
// (see trac-13936).
|
||||||
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
|
// Limit the fix to IE & Edge Legacy; despite Edge 15+ implementing `matches`,
|
||||||
// two documents; shallow comparisons work.
|
// all IE 9+ and Edge Legacy versions implement `msMatchesSelector` as well.
|
||||||
// eslint-disable-next-line eqeqeq
|
if ( documentElement.msMatchesSelector &&
|
||||||
if ( preferredDoc != document &&
|
|
||||||
|
// Support: IE 11+, Edge 17 - 18+
|
||||||
|
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
|
||||||
|
// two documents; shallow comparisons work.
|
||||||
|
// eslint-disable-next-line eqeqeq
|
||||||
|
preferredDoc != document &&
|
||||||
( subWindow = document.defaultView ) && subWindow.top !== subWindow ) {
|
( subWindow = document.defaultView ) && subWindow.top !== subWindow ) {
|
||||||
|
|
||||||
// Support: IE 9 - 11+, Edge 12 - 18+
|
// Support: IE 9 - 11+, Edge 12 - 18+
|
||||||
@@ -2694,12 +2704,12 @@ jQuery.find = find;
|
|||||||
jQuery.expr[ ":" ] = jQuery.expr.pseudos;
|
jQuery.expr[ ":" ] = jQuery.expr.pseudos;
|
||||||
jQuery.unique = jQuery.uniqueSort;
|
jQuery.unique = jQuery.uniqueSort;
|
||||||
|
|
||||||
// These have always been private, but they used to be documented
|
// These have always been private, but they used to be documented as part of
|
||||||
// as part of Sizzle so let's maintain them in the 3.x line
|
// Sizzle so let's maintain them for now for backwards compatibility purposes.
|
||||||
// for backwards compatibility purposes.
|
|
||||||
find.compile = compile;
|
find.compile = compile;
|
||||||
find.select = select;
|
find.select = select;
|
||||||
find.setDocument = setDocument;
|
find.setDocument = setDocument;
|
||||||
|
find.tokenize = tokenize;
|
||||||
|
|
||||||
find.escape = jQuery.escapeSelector;
|
find.escape = jQuery.escapeSelector;
|
||||||
find.getText = jQuery.text;
|
find.getText = jQuery.text;
|
||||||
@@ -5913,7 +5923,7 @@ function domManip( collection, args, callback, ignored ) {
|
|||||||
if ( hasScripts ) {
|
if ( hasScripts ) {
|
||||||
doc = scripts[ scripts.length - 1 ].ownerDocument;
|
doc = scripts[ scripts.length - 1 ].ownerDocument;
|
||||||
|
|
||||||
// Reenable scripts
|
// Re-enable scripts
|
||||||
jQuery.map( scripts, restoreScript );
|
jQuery.map( scripts, restoreScript );
|
||||||
|
|
||||||
// Evaluate executable scripts on first document insertion
|
// Evaluate executable scripts on first document insertion
|
||||||
@@ -6370,7 +6380,7 @@ var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" );
|
|||||||
trChild = document.createElement( "div" );
|
trChild = document.createElement( "div" );
|
||||||
|
|
||||||
table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate";
|
table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate";
|
||||||
tr.style.cssText = "border:1px solid";
|
tr.style.cssText = "box-sizing:content-box;border:1px solid";
|
||||||
|
|
||||||
// Support: Chrome 86+
|
// Support: Chrome 86+
|
||||||
// Height set through cssText does not get applied.
|
// Height set through cssText does not get applied.
|
||||||
@@ -6382,7 +6392,7 @@ var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" );
|
|||||||
// In our bodyBackground.html iframe,
|
// In our bodyBackground.html iframe,
|
||||||
// display for all div elements is set to "inline",
|
// display for all div elements is set to "inline",
|
||||||
// which causes a problem only in Android 8 Chrome 86.
|
// which causes a problem only in Android 8 Chrome 86.
|
||||||
// Ensuring the div is display: block
|
// Ensuring the div is `display: block`
|
||||||
// gets around this issue.
|
// gets around this issue.
|
||||||
trChild.style.display = "block";
|
trChild.style.display = "block";
|
||||||
|
|
||||||
@@ -8451,7 +8461,9 @@ jQuery.fn.extend( {
|
|||||||
},
|
},
|
||||||
|
|
||||||
hover: function( fnOver, fnOut ) {
|
hover: function( fnOver, fnOut ) {
|
||||||
return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
|
return this
|
||||||
|
.on( "mouseenter", fnOver )
|
||||||
|
.on( "mouseleave", fnOut || fnOver );
|
||||||
}
|
}
|
||||||
} );
|
} );
|
||||||
|
|
||||||
@@ -23,12 +23,12 @@
|
|||||||
{{#if page_data.web_vault_enabled}}
|
{{#if page_data.web_vault_enabled}}
|
||||||
<dt class="col-sm-5">Web Installed
|
<dt class="col-sm-5">Web Installed
|
||||||
<span class="badge bg-success d-none" id="web-success" title="Latest version is installed.">Ok</span>
|
<span class="badge bg-success d-none" id="web-success" title="Latest version is installed.">Ok</span>
|
||||||
<span class="badge bg-warning d-none" id="web-warning" title="There seems to be an update available.">Update</span>
|
<span class="badge bg-warning text-dark d-none" id="web-warning" title="There seems to be an update available.">Update</span>
|
||||||
</dt>
|
</dt>
|
||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
<span id="web-installed">{{page_data.web_vault_version}}</span>
|
<span id="web-installed">{{page_data.web_vault_version}}</span>
|
||||||
</dd>
|
</dd>
|
||||||
{{#unless page_data.running_within_docker}}
|
{{#unless page_data.running_within_container}}
|
||||||
<dt class="col-sm-5">Web Latest
|
<dt class="col-sm-5">Web Latest
|
||||||
<span class="badge bg-secondary d-none" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
<span class="badge bg-secondary d-none" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||||
</dt>
|
</dt>
|
||||||
@@ -59,12 +59,12 @@
|
|||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
<span class="d-block"><b>{{ page_data.host_os }} / {{ page_data.host_arch }}</b></span>
|
<span class="d-block"><b>{{ page_data.host_os }} / {{ page_data.host_arch }}</b></span>
|
||||||
</dd>
|
</dd>
|
||||||
<dt class="col-sm-5">Running within Docker</dt>
|
<dt class="col-sm-5">Running within a container</dt>
|
||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
{{#if page_data.running_within_docker}}
|
{{#if page_data.running_within_container}}
|
||||||
<span class="d-block"><b>Yes (Base: {{ page_data.docker_base_image }})</b></span>
|
<span class="d-block"><b>Yes (Base: {{ page_data.container_base_image }})</b></span>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
{{#unless page_data.running_within_docker}}
|
{{#unless page_data.running_within_container}}
|
||||||
<span class="d-block"><b>No</b></span>
|
<span class="d-block"><b>No</b></span>
|
||||||
{{/unless}}
|
{{/unless}}
|
||||||
</dd>
|
</dd>
|
||||||
|
|||||||
@@ -17,12 +17,12 @@
|
|||||||
{{#each page_data}}
|
{{#each page_data}}
|
||||||
<tr>
|
<tr>
|
||||||
<td>
|
<td>
|
||||||
<svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{Id}}">
|
<svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{id}}">
|
||||||
<div class="float-start">
|
<div class="float-start">
|
||||||
<strong>{{Name}}</strong>
|
<strong>{{name}}</strong>
|
||||||
<span class="me-2">({{BillingEmail}})</span>
|
<span class="me-2">({{billingEmail}})</span>
|
||||||
<span class="d-block">
|
<span class="d-block">
|
||||||
<span class="badge bg-success font-monospace">{{Id}}</span>
|
<span class="badge bg-success font-monospace">{{id}}</span>
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
</td>
|
</td>
|
||||||
@@ -44,7 +44,7 @@
|
|||||||
<span class="d-block"><strong>Events:</strong> {{event_count}}</span>
|
<span class="d-block"><strong>Events:</strong> {{event_count}}</span>
|
||||||
</td>
|
</td>
|
||||||
<td class="text-end px-0 small">
|
<td class="text-end px-0 small">
|
||||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-delete-organization data-vw-org-uuid="{{jsesc Id no_quote}}" data-vw-org-name="{{jsesc Name no_quote}}" data-vw-billing-email="{{jsesc BillingEmail no_quote}}">Delete Organization</button><br>
|
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-delete-organization data-vw-org-uuid="{{jsesc id no_quote}}" data-vw-org-name="{{jsesc name no_quote}}" data-vw-billing-email="{{jsesc billingEmail no_quote}}">Delete Organization</button><br>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
{{/each}}
|
{{/each}}
|
||||||
@@ -59,7 +59,7 @@
|
|||||||
</main>
|
</main>
|
||||||
|
|
||||||
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
|
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
|
||||||
<script src="{{urlpath}}/vw_static/jquery-3.7.0.slim.js"></script>
|
<script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script>
|
||||||
<script src="{{urlpath}}/vw_static/datatables.js"></script>
|
<script src="{{urlpath}}/vw_static/datatables.js"></script>
|
||||||
<script src="{{urlpath}}/vw_static/admin_organizations.js"></script>
|
<script src="{{urlpath}}/vw_static/admin_organizations.js"></script>
|
||||||
<script src="{{urlpath}}/vw_static/jdenticon.js"></script>
|
<script src="{{urlpath}}/vw_static/jdenticon-3.3.0.js"></script>
|
||||||
|
|||||||
@@ -18,21 +18,21 @@
|
|||||||
{{#each page_data}}
|
{{#each page_data}}
|
||||||
<tr>
|
<tr>
|
||||||
<td>
|
<td>
|
||||||
<svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{Email}}">
|
<svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{email}}">
|
||||||
<div class="float-start">
|
<div class="float-start">
|
||||||
<strong>{{Name}}</strong>
|
<strong>{{name}}</strong>
|
||||||
<span class="d-block">{{Email}}</span>
|
<span class="d-block">{{email}}</span>
|
||||||
<span class="d-block">
|
<span class="d-block">
|
||||||
{{#unless user_enabled}}
|
{{#unless user_enabled}}
|
||||||
<span class="badge bg-danger me-2" title="User is disabled">Disabled</span>
|
<span class="badge bg-danger me-2" title="User is disabled">Disabled</span>
|
||||||
{{/unless}}
|
{{/unless}}
|
||||||
{{#if TwoFactorEnabled}}
|
{{#if twoFactorEnabled}}
|
||||||
<span class="badge bg-success me-2" title="2FA is enabled">2FA</span>
|
<span class="badge bg-success me-2" title="2FA is enabled">2FA</span>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
{{#case _Status 1}}
|
{{#case _status 1}}
|
||||||
<span class="badge bg-warning text-dark me-2" title="User is invited">Invited</span>
|
<span class="badge bg-warning text-dark me-2" title="User is invited">Invited</span>
|
||||||
{{/case}}
|
{{/case}}
|
||||||
{{#if EmailVerified}}
|
{{#if emailVerified}}
|
||||||
<span class="badge bg-success me-2" title="Email has been verified">Verified</span>
|
<span class="badge bg-success me-2" title="Email has been verified">Verified</span>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
</span>
|
</span>
|
||||||
@@ -54,15 +54,15 @@
|
|||||||
{{/if}}
|
{{/if}}
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<div class="overflow-auto vw-org-cell" data-vw-user-email="{{jsesc Email no_quote}}" data-vw-user-uuid="{{jsesc Id no_quote}}">
|
<div class="overflow-auto vw-org-cell" data-vw-user-email="{{jsesc email no_quote}}" data-vw-user-uuid="{{jsesc id no_quote}}">
|
||||||
{{#each Organizations}}
|
{{#each organizations}}
|
||||||
<button class="badge" data-bs-toggle="modal" data-bs-target="#userOrgTypeDialog" data-vw-org-type="{{Type}}" data-vw-org-uuid="{{jsesc Id no_quote}}" data-vw-org-name="{{jsesc Name no_quote}}">{{Name}}</button>
|
<button class="badge" data-bs-toggle="modal" data-bs-target="#userOrgTypeDialog" data-vw-org-type="{{type}}" data-vw-org-uuid="{{jsesc id no_quote}}" data-vw-org-name="{{jsesc name no_quote}}">{{name}}</button>
|
||||||
{{/each}}
|
{{/each}}
|
||||||
</div>
|
</div>
|
||||||
</td>
|
</td>
|
||||||
<td class="text-end px-0 small">
|
<td class="text-end px-0 small">
|
||||||
<span data-vw-user-uuid="{{jsesc Id no_quote}}" data-vw-user-email="{{jsesc Email no_quote}}">
|
<span data-vw-user-uuid="{{jsesc id no_quote}}" data-vw-user-email="{{jsesc email no_quote}}">
|
||||||
{{#if TwoFactorEnabled}}
|
{{#if twoFactorEnabled}}
|
||||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-remove2fa>Remove all 2FA</button><br>
|
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-remove2fa>Remove all 2FA</button><br>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-deauth-user>Deauthorize sessions</button><br>
|
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-deauth-user>Deauthorize sessions</button><br>
|
||||||
@@ -72,7 +72,7 @@
|
|||||||
{{else}}
|
{{else}}
|
||||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-enable-user>Enable User</button><br>
|
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-enable-user>Enable User</button><br>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
{{#case _Status 1}}
|
{{#case _status 1}}
|
||||||
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-resend-user-invite>Resend invite</button><br>
|
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-resend-user-invite>Resend invite</button><br>
|
||||||
{{/case}}
|
{{/case}}
|
||||||
</span>
|
</span>
|
||||||
@@ -140,7 +140,7 @@
|
|||||||
</main>
|
</main>
|
||||||
|
|
||||||
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
|
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
|
||||||
<script src="{{urlpath}}/vw_static/jquery-3.7.0.slim.js"></script>
|
<script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script>
|
||||||
<script src="{{urlpath}}/vw_static/datatables.js"></script>
|
<script src="{{urlpath}}/vw_static/datatables.js"></script>
|
||||||
<script src="{{urlpath}}/vw_static/admin_users.js"></script>
|
<script src="{{urlpath}}/vw_static/admin_users.js"></script>
|
||||||
<script src="{{urlpath}}/vw_static/jdenticon.js"></script>
|
<script src="{{urlpath}}/vw_static/jdenticon-3.3.0.js"></script>
|
||||||
|
|||||||
@@ -2,5 +2,5 @@ Your Email Change
|
|||||||
<!---------------->
|
<!---------------->
|
||||||
To finalize changing your email address enter the following code in web vault: {{token}}
|
To finalize changing your email address enter the following code in web vault: {{token}}
|
||||||
|
|
||||||
If you did not try to change an email address, you can safely ignore this email.
|
If you did not try to change your email address, contact your administrator.
|
||||||
{{> email/email_footer_text }}
|
{{> email/email_footer_text }}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Your Email Change
|
|||||||
</tr>
|
</tr>
|
||||||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
|
||||||
If you did not try to change an email address, you can safely ignore this email.
|
If you did not try to change your email address, contact your administrator.
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|||||||
6
src/static/templates/email/protected_action.hbs
Normal file
6
src/static/templates/email/protected_action.hbs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
Your Vaultwarden Verification Code
|
||||||
|
<!---------------->
|
||||||
|
Your email verification code is: {{token}}
|
||||||
|
|
||||||
|
Use this code to complete the protected action in Vaultwarden.
|
||||||
|
{{> email/email_footer_text }}
|
||||||
16
src/static/templates/email/protected_action.html.hbs
Normal file
16
src/static/templates/email/protected_action.html.hbs
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
Your Vaultwarden Verification Code
|
||||||
|
<!---------------->
|
||||||
|
{{> email/email_header }}
|
||||||
|
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
|
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
||||||
|
Your email verification code is: <b>{{token}}</b>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
|
||||||
|
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
|
||||||
|
Use this code to complete the protected action in Vaultwarden.
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
{{> email/email_footer }}
|
||||||
444
src/util.rs
444
src/util.rs
@@ -1,11 +1,10 @@
|
|||||||
//
|
//
|
||||||
// Web Headers and caching
|
// Web Headers and caching
|
||||||
//
|
//
|
||||||
use std::{
|
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
|
||||||
io::{Cursor, ErrorKind},
|
|
||||||
ops::Deref,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
use num_traits::ToPrimitive;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
use rocket::{
|
use rocket::{
|
||||||
fairing::{Fairing, Info, Kind},
|
fairing::{Fairing, Info, Kind},
|
||||||
http::{ContentType, Header, HeaderMap, Method, Status},
|
http::{ContentType, Header, HeaderMap, Method, Status},
|
||||||
@@ -46,6 +45,7 @@ impl Fairing for AppHeaders {
|
|||||||
// Remove headers which could cause websocket connection issues
|
// Remove headers which could cause websocket connection issues
|
||||||
res.remove_header("X-Frame-Options");
|
res.remove_header("X-Frame-Options");
|
||||||
res.remove_header("X-Content-Type-Options");
|
res.remove_header("X-Content-Type-Options");
|
||||||
|
res.remove_header("Permissions-Policy");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
(_, _) => (),
|
(_, _) => (),
|
||||||
@@ -215,7 +215,7 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache
|
|||||||
res.set_raw_header("Cache-Control", cache_control_header);
|
res.set_raw_header("Cache-Control", cache_control_header);
|
||||||
|
|
||||||
let time_now = chrono::Local::now();
|
let time_now = chrono::Local::now();
|
||||||
let expiry_time = time_now + chrono::Duration::seconds(self.ttl.try_into().unwrap());
|
let expiry_time = time_now + chrono::TimeDelta::try_seconds(self.ttl.try_into().unwrap()).unwrap();
|
||||||
res.set_raw_header("Expires", format_datetime_http(&expiry_time));
|
res.set_raw_header("Expires", format_datetime_http(&expiry_time));
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
@@ -331,52 +331,14 @@ impl Fairing for BetterLogging {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
pub fn get_display_size(size: i64) -> String {
|
||||||
// File handling
|
|
||||||
//
|
|
||||||
use std::{
|
|
||||||
fs::{self, File},
|
|
||||||
io::Result as IOResult,
|
|
||||||
path::Path,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn file_exists(path: &str) -> bool {
|
|
||||||
Path::new(path).exists()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write_file(path: &str, content: &[u8]) -> Result<(), crate::error::Error> {
|
|
||||||
use std::io::Write;
|
|
||||||
let mut f = match File::create(path) {
|
|
||||||
Ok(file) => file,
|
|
||||||
Err(e) => {
|
|
||||||
if e.kind() == ErrorKind::PermissionDenied {
|
|
||||||
error!("Can't create '{}': Permission denied", path);
|
|
||||||
}
|
|
||||||
return Err(From::from(e));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
f.write_all(content)?;
|
|
||||||
f.flush()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn delete_file(path: &str) -> IOResult<()> {
|
|
||||||
let res = fs::remove_file(path);
|
|
||||||
|
|
||||||
if let Some(parent) = Path::new(path).parent() {
|
|
||||||
// If the directory isn't empty, this returns an error, which we ignore
|
|
||||||
// We only want to delete the folder if it's empty
|
|
||||||
fs::remove_dir(parent).ok();
|
|
||||||
}
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_display_size(size: i32) -> String {
|
|
||||||
const UNITS: [&str; 6] = ["bytes", "KB", "MB", "GB", "TB", "PB"];
|
const UNITS: [&str; 6] = ["bytes", "KB", "MB", "GB", "TB", "PB"];
|
||||||
|
|
||||||
let mut size: f64 = size.into();
|
// If we're somehow too big for a f64, just return the size in bytes
|
||||||
|
let Some(mut size) = size.to_f64() else {
|
||||||
|
return format!("{size} bytes");
|
||||||
|
};
|
||||||
|
|
||||||
let mut unit_counter = 0;
|
let mut unit_counter = 0;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
@@ -445,7 +407,7 @@ pub fn get_env_str_value(key: &str) -> Option<String> {
|
|||||||
match (value_from_env, value_file) {
|
match (value_from_env, value_file) {
|
||||||
(Ok(_), Ok(_)) => panic!("You should not define both {key} and {key_file}!"),
|
(Ok(_), Ok(_)) => panic!("You should not define both {key} and {key_file}!"),
|
||||||
(Ok(v_env), Err(_)) => Some(v_env),
|
(Ok(v_env), Err(_)) => Some(v_env),
|
||||||
(Err(_), Ok(v_file)) => match fs::read_to_string(v_file) {
|
(Err(_), Ok(v_file)) => match std::fs::read_to_string(v_file) {
|
||||||
Ok(content) => Some(content.trim().to_string()),
|
Ok(content) => Some(content.trim().to_string()),
|
||||||
Err(e) => panic!("Failed to load {key}: {e:?}"),
|
Err(e) => panic!("Failed to load {key}: {e:?}"),
|
||||||
},
|
},
|
||||||
@@ -532,14 +494,17 @@ pub fn parse_date(date: &str) -> NaiveDateTime {
|
|||||||
// Deployment environment methods
|
// Deployment environment methods
|
||||||
//
|
//
|
||||||
|
|
||||||
/// Returns true if the program is running in Docker or Podman.
|
/// Returns true if the program is running in Docker, Podman or Kubernetes.
|
||||||
pub fn is_running_in_docker() -> bool {
|
pub fn is_running_in_container() -> bool {
|
||||||
Path::new("/.dockerenv").exists() || Path::new("/run/.containerenv").exists()
|
Path::new("/.dockerenv").exists()
|
||||||
|
|| Path::new("/run/.containerenv").exists()
|
||||||
|
|| Path::new("/run/secrets/kubernetes.io").exists()
|
||||||
|
|| Path::new("/var/run/secrets/kubernetes.io").exists()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Simple check to determine on which docker base image vaultwarden is running.
|
/// Simple check to determine on which container base image vaultwarden is running.
|
||||||
/// We build images based upon Debian or Alpine, so these we check here.
|
/// We build images based upon Debian or Alpine, so these we check here.
|
||||||
pub fn docker_base_image() -> &'static str {
|
pub fn container_base_image() -> &'static str {
|
||||||
if Path::new("/etc/debian_version").exists() {
|
if Path::new("/etc/debian_version").exists() {
|
||||||
"Debian"
|
"Debian"
|
||||||
} else if Path::new("/etc/alpine-release").exists() {
|
} else if Path::new("/etc/alpine-release").exists() {
|
||||||
@@ -556,30 +521,38 @@ pub fn docker_base_image() -> &'static str {
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
|
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
|
||||||
use serde_json::{self, Value};
|
use serde_json::Value;
|
||||||
|
|
||||||
pub type JsonMap = serde_json::Map<String, Value>;
|
pub type JsonMap = serde_json::Map<String, Value>;
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct UpCase<T: DeserializeOwned> {
|
pub struct LowerCase<T: DeserializeOwned> {
|
||||||
#[serde(deserialize_with = "upcase_deserialize")]
|
#[serde(deserialize_with = "lowercase_deserialize")]
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub data: T,
|
pub data: T,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for LowerCase<Value> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
data: Value::Null,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// https://github.com/serde-rs/serde/issues/586
|
// https://github.com/serde-rs/serde/issues/586
|
||||||
pub fn upcase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
|
pub fn lowercase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
|
||||||
where
|
where
|
||||||
T: DeserializeOwned,
|
T: DeserializeOwned,
|
||||||
D: Deserializer<'de>,
|
D: Deserializer<'de>,
|
||||||
{
|
{
|
||||||
let d = deserializer.deserialize_any(UpCaseVisitor)?;
|
let d = deserializer.deserialize_any(LowerCaseVisitor)?;
|
||||||
T::deserialize(d).map_err(de::Error::custom)
|
T::deserialize(d).map_err(de::Error::custom)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct UpCaseVisitor;
|
struct LowerCaseVisitor;
|
||||||
|
|
||||||
impl<'de> Visitor<'de> for UpCaseVisitor {
|
impl<'de> Visitor<'de> for LowerCaseVisitor {
|
||||||
type Value = Value;
|
type Value = Value;
|
||||||
|
|
||||||
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
@@ -593,7 +566,7 @@ impl<'de> Visitor<'de> for UpCaseVisitor {
|
|||||||
let mut result_map = JsonMap::new();
|
let mut result_map = JsonMap::new();
|
||||||
|
|
||||||
while let Some((key, value)) = map.next_entry()? {
|
while let Some((key, value)) = map.next_entry()? {
|
||||||
result_map.insert(upcase_first(key), upcase_value(value));
|
result_map.insert(_process_key(key), convert_json_key_lcase_first(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Object(result_map))
|
Ok(Value::Object(result_map))
|
||||||
@@ -606,41 +579,60 @@ impl<'de> Visitor<'de> for UpCaseVisitor {
|
|||||||
let mut result_seq = Vec::<Value>::new();
|
let mut result_seq = Vec::<Value>::new();
|
||||||
|
|
||||||
while let Some(value) = seq.next_element()? {
|
while let Some(value) = seq.next_element()? {
|
||||||
result_seq.push(upcase_value(value));
|
result_seq.push(convert_json_key_lcase_first(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Array(result_seq))
|
Ok(Value::Array(result_seq))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn upcase_value(value: Value) -> Value {
|
|
||||||
if let Value::Object(map) = value {
|
|
||||||
let mut new_value = Value::Object(serde_json::Map::new());
|
|
||||||
|
|
||||||
for (key, val) in map.into_iter() {
|
|
||||||
let processed_key = _process_key(&key);
|
|
||||||
new_value[processed_key] = upcase_value(val);
|
|
||||||
}
|
|
||||||
new_value
|
|
||||||
} else if let Value::Array(array) = value {
|
|
||||||
// Initialize array with null values
|
|
||||||
let mut new_value = Value::Array(vec![Value::Null; array.len()]);
|
|
||||||
|
|
||||||
for (index, val) in array.into_iter().enumerate() {
|
|
||||||
new_value[index] = upcase_value(val);
|
|
||||||
}
|
|
||||||
new_value
|
|
||||||
} else {
|
|
||||||
value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inner function to handle a special case for the 'ssn' key.
|
// Inner function to handle a special case for the 'ssn' key.
|
||||||
// This key is part of the Identity Cipher (Social Security Number)
|
// This key is part of the Identity Cipher (Social Security Number)
|
||||||
fn _process_key(key: &str) -> String {
|
fn _process_key(key: &str) -> String {
|
||||||
match key.to_lowercase().as_ref() {
|
match key.to_lowercase().as_ref() {
|
||||||
"ssn" => "SSN".into(),
|
"ssn" => "ssn".into(),
|
||||||
_ => self::upcase_first(key),
|
_ => self::lcase_first(key),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum NumberOrString {
|
||||||
|
Number(i64),
|
||||||
|
String(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NumberOrString {
|
||||||
|
pub fn into_string(self) -> String {
|
||||||
|
match self {
|
||||||
|
NumberOrString::Number(n) => n.to_string(),
|
||||||
|
NumberOrString::String(s) => s,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::wrong_self_convention)]
|
||||||
|
pub fn into_i32(&self) -> Result<i32, crate::Error> {
|
||||||
|
use std::num::ParseIntError as PIE;
|
||||||
|
match self {
|
||||||
|
NumberOrString::Number(n) => match n.to_i32() {
|
||||||
|
Some(n) => Ok(n),
|
||||||
|
None => err!("Number does not fit in i32"),
|
||||||
|
},
|
||||||
|
NumberOrString::String(s) => {
|
||||||
|
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::wrong_self_convention)]
|
||||||
|
pub fn into_i64(&self) -> Result<i64, crate::Error> {
|
||||||
|
use std::num::ParseIntError as PIE;
|
||||||
|
match self {
|
||||||
|
NumberOrString::Number(n) => Ok(*n),
|
||||||
|
NumberOrString::String(s) => {
|
||||||
|
s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -696,14 +688,9 @@ where
|
|||||||
|
|
||||||
use reqwest::{header, Client, ClientBuilder};
|
use reqwest::{header, Client, ClientBuilder};
|
||||||
|
|
||||||
pub fn get_reqwest_client() -> Client {
|
pub fn get_reqwest_client() -> &'static Client {
|
||||||
match get_reqwest_client_builder().build() {
|
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
|
||||||
Ok(client) => client,
|
&INSTANCE
|
||||||
Err(e) => {
|
|
||||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
|
||||||
get_reqwest_client_builder().trust_dns(false).build().expect("Failed to build client")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_reqwest_client_builder() -> ClientBuilder {
|
pub fn get_reqwest_client_builder() -> ClientBuilder {
|
||||||
@@ -725,25 +712,25 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
|
|||||||
|
|
||||||
Value::Object(obj) => {
|
Value::Object(obj) => {
|
||||||
let mut json_map = JsonMap::new();
|
let mut json_map = JsonMap::new();
|
||||||
for (key, value) in obj.iter() {
|
for (key, value) in obj.into_iter() {
|
||||||
match (key, value) {
|
match (key, value) {
|
||||||
(key, Value::Object(elm)) => {
|
(key, Value::Object(elm)) => {
|
||||||
let inner_value = convert_json_key_lcase_first(Value::Object(elm.clone()));
|
let inner_value = convert_json_key_lcase_first(Value::Object(elm));
|
||||||
json_map.insert(lcase_first(key), inner_value);
|
json_map.insert(_process_key(&key), inner_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
(key, Value::Array(elm)) => {
|
(key, Value::Array(elm)) => {
|
||||||
let mut inner_array: Vec<Value> = Vec::with_capacity(elm.len());
|
let mut inner_array: Vec<Value> = Vec::with_capacity(elm.len());
|
||||||
|
|
||||||
for inner_obj in elm {
|
for inner_obj in elm {
|
||||||
inner_array.push(convert_json_key_lcase_first(inner_obj.clone()));
|
inner_array.push(convert_json_key_lcase_first(inner_obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
json_map.insert(lcase_first(key), Value::Array(inner_array));
|
json_map.insert(_process_key(&key), Value::Array(inner_array));
|
||||||
}
|
}
|
||||||
|
|
||||||
(key, value) => {
|
(key, value) => {
|
||||||
json_map.insert(lcase_first(key), value.clone());
|
json_map.insert(_process_key(&key), value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -754,3 +741,256 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
|
|||||||
value => value,
|
value => value,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Parses the experimental client feature flags string into a HashMap.
|
||||||
|
pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags: &str) -> HashMap<String, bool> {
|
||||||
|
let feature_states =
|
||||||
|
experimental_client_feature_flags.to_lowercase().split(',').map(|f| (f.trim().to_owned(), true)).collect();
|
||||||
|
|
||||||
|
feature_states
|
||||||
|
}
|
||||||
|
|
||||||
|
mod dns_resolver {
|
||||||
|
use std::{
|
||||||
|
fmt,
|
||||||
|
net::{IpAddr, SocketAddr},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use reqwest::dns::{Name, Resolve, Resolving};
|
||||||
|
|
||||||
|
use crate::{util::is_global, CONFIG};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum CustomResolverError {
|
||||||
|
Blacklist {
|
||||||
|
domain: String,
|
||||||
|
},
|
||||||
|
NonGlobalIp {
|
||||||
|
domain: String,
|
||||||
|
ip: IpAddr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CustomResolverError {
|
||||||
|
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
|
||||||
|
let mut source = e.source();
|
||||||
|
|
||||||
|
while let Some(err) = source {
|
||||||
|
source = err.source();
|
||||||
|
if let Some(err) = err.downcast_ref::<CustomResolverError>() {
|
||||||
|
return Some(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for CustomResolverError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Blacklist {
|
||||||
|
domain,
|
||||||
|
} => write!(f, "Blacklisted domain: {domain} matched ICON_BLACKLIST_REGEX"),
|
||||||
|
Self::NonGlobalIp {
|
||||||
|
domain,
|
||||||
|
ip,
|
||||||
|
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for CustomResolverError {}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum CustomDnsResolver {
|
||||||
|
Default(),
|
||||||
|
Hickory(Arc<TokioAsyncResolver>),
|
||||||
|
}
|
||||||
|
type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
||||||
|
|
||||||
|
impl CustomDnsResolver {
|
||||||
|
pub fn instance() -> Arc<Self> {
|
||||||
|
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
|
||||||
|
Arc::clone(&*INSTANCE)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new() -> Arc<Self> {
|
||||||
|
match read_system_conf() {
|
||||||
|
Ok((config, opts)) => {
|
||||||
|
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
|
||||||
|
Arc::new(Self::Hickory(Arc::new(resolver)))
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
|
||||||
|
Arc::new(Self::Default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that we get an iterator of addresses, but we only grab the first one for convenience
|
||||||
|
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
|
||||||
|
pre_resolve(name)?;
|
||||||
|
|
||||||
|
let result = match self {
|
||||||
|
Self::Default() => tokio::net::lookup_host(name).await?.next(),
|
||||||
|
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(addr) = &result {
|
||||||
|
post_resolve(name, addr.ip())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pre_resolve(name: &str) -> Result<(), CustomResolverError> {
|
||||||
|
if crate::api::is_domain_blacklisted(name) {
|
||||||
|
return Err(CustomResolverError::Blacklist {
|
||||||
|
domain: name.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomResolverError> {
|
||||||
|
if CONFIG.icon_blacklist_non_global_ips() && !is_global(ip) {
|
||||||
|
Err(CustomResolverError::NonGlobalIp {
|
||||||
|
domain: name.to_string(),
|
||||||
|
ip,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resolve for CustomDnsResolver {
|
||||||
|
fn resolve(&self, name: Name) -> Resolving {
|
||||||
|
let this = self.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let name = name.as_str();
|
||||||
|
let result = this.resolve_domain(name).await?;
|
||||||
|
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub use dns_resolver::{CustomDnsResolver, CustomResolverError};
|
||||||
|
|
||||||
|
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||||
|
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
||||||
|
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
||||||
|
#[allow(clippy::nonminimal_bool)]
|
||||||
|
#[cfg(any(not(feature = "unstable"), test))]
|
||||||
|
pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
|
||||||
|
match ip {
|
||||||
|
std::net::IpAddr::V4(ip) => {
|
||||||
|
!(ip.octets()[0] == 0 // "This network"
|
||||||
|
|| ip.is_private()
|
||||||
|
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|
||||||
|
|| ip.is_loopback()
|
||||||
|
|| ip.is_link_local()
|
||||||
|
// addresses reserved for future protocols (`192.0.0.0/24`)
|
||||||
|
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
||||||
|
|| ip.is_documentation()
|
||||||
|
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|
||||||
|
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|
||||||
|
|| ip.is_broadcast())
|
||||||
|
}
|
||||||
|
std::net::IpAddr::V6(ip) => {
|
||||||
|
!(ip.is_unspecified()
|
||||||
|
|| ip.is_loopback()
|
||||||
|
// IPv4-mapped Address (`::ffff:0:0/96`)
|
||||||
|
|| matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
|
||||||
|
// IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
|
||||||
|
|| matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
|
||||||
|
// Discard-Only Address Block (`100::/64`)
|
||||||
|
|| matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _])
|
||||||
|
// IETF Protocol Assignments (`2001::/23`)
|
||||||
|
|| (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
|
||||||
|
&& !(
|
||||||
|
// Port Control Protocol Anycast (`2001:1::1`)
|
||||||
|
u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
|
||||||
|
// Traversal Using Relays around NAT Anycast (`2001:1::2`)
|
||||||
|
|| u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
|
||||||
|
// AMT (`2001:3::/32`)
|
||||||
|
|| matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _])
|
||||||
|
// AS112-v6 (`2001:4:112::/48`)
|
||||||
|
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
|
||||||
|
// ORCHIDv2 (`2001:20::/28`)
|
||||||
|
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
|
||||||
|
))
|
||||||
|
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|
||||||
|
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|
||||||
|
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "unstable"))]
|
||||||
|
pub use is_global_hardcoded as is_global;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable")]
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn is_global(ip: std::net::IpAddr) -> bool {
|
||||||
|
ip.is_global()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// These are some tests to check that the implementations match
|
||||||
|
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
|
||||||
|
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
|
||||||
|
/// Note that the is_global implementation is subject to change as new IP RFCs are created
|
||||||
|
///
|
||||||
|
/// To run while showing progress output:
|
||||||
|
/// cargo +nightly test --release --features sqlite,unstable -- --nocapture --ignored
|
||||||
|
#[cfg(test)]
|
||||||
|
#[cfg(feature = "unstable")]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_ipv4_global() {
|
||||||
|
for a in 0..u8::MAX {
|
||||||
|
println!("Iter: {}/255", a);
|
||||||
|
for b in 0..u8::MAX {
|
||||||
|
for c in 0..u8::MAX {
|
||||||
|
for d in 0..u8::MAX {
|
||||||
|
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
|
||||||
|
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {}", ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_ipv6_global() {
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
std::thread::scope(|s| {
|
||||||
|
for t in 0..16 {
|
||||||
|
let handle = s.spawn(move || {
|
||||||
|
let mut v = [0u8; 16];
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
|
||||||
|
for i in 0..20 {
|
||||||
|
println!("Thread {t} Iter: {i}/50");
|
||||||
|
for _ in 0..500_000_000 {
|
||||||
|
rng.fill(&mut v);
|
||||||
|
let ip = IpAddr::V6(std::net::Ipv6Addr::from(v));
|
||||||
|
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {ip}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -10,19 +10,19 @@ import urllib.request
|
|||||||
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
if not (2 <= len(sys.argv) <= 3):
|
if not 2 <= len(sys.argv) <= 3:
|
||||||
print("usage: %s <OUTPUT-FILE> [GIT-REF]" % sys.argv[0])
|
print(f"usage: {sys.argv[0]} <OUTPUT-FILE> [GIT-REF]")
|
||||||
print()
|
print()
|
||||||
print("This script generates a global equivalent domains JSON file from")
|
print("This script generates a global equivalent domains JSON file from")
|
||||||
print("the upstream Bitwarden source repo.")
|
print("the upstream Bitwarden source repo.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
OUTPUT_FILE = sys.argv[1]
|
OUTPUT_FILE = sys.argv[1]
|
||||||
GIT_REF = 'master' if len(sys.argv) == 2 else sys.argv[2]
|
GIT_REF = 'main' if len(sys.argv) == 2 else sys.argv[2]
|
||||||
|
|
||||||
BASE_URL = 'https://github.com/bitwarden/server/raw/%s' % GIT_REF
|
BASE_URL = f'https://github.com/bitwarden/server/raw/{GIT_REF}'
|
||||||
ENUMS_URL = '%s/src/Core/Enums/GlobalEquivalentDomainsType.cs' % BASE_URL
|
ENUMS_URL = f'{BASE_URL}/src/Core/Enums/GlobalEquivalentDomainsType.cs'
|
||||||
DOMAIN_LISTS_URL = '%s/src/Core/Utilities/StaticStore.cs' % BASE_URL
|
DOMAIN_LISTS_URL = f'{BASE_URL}/src/Core/Utilities/StaticStore.cs'
|
||||||
|
|
||||||
# Enum lines look like:
|
# Enum lines look like:
|
||||||
#
|
#
|
||||||
@@ -71,11 +71,11 @@ with urllib.request.urlopen(DOMAIN_LISTS_URL) as response:
|
|||||||
global_domains = []
|
global_domains = []
|
||||||
for name, domain_list in domain_lists.items():
|
for name, domain_list in domain_lists.items():
|
||||||
entry = OrderedDict()
|
entry = OrderedDict()
|
||||||
entry["Type"] = enums[name]
|
entry["type"] = enums[name]
|
||||||
entry["Domains"] = domain_list
|
entry["domains"] = domain_list
|
||||||
entry["Excluded"] = False
|
entry["excluded"] = False
|
||||||
global_domains.append(entry)
|
global_domains.append(entry)
|
||||||
|
|
||||||
# Write out the global domains JSON file.
|
# Write out the global domains JSON file.
|
||||||
with open(OUTPUT_FILE, 'w') as f:
|
with open(file=OUTPUT_FILE, mode='w', encoding='utf-8') as f:
|
||||||
json.dump(global_domains, f, indent=2)
|
json.dump(global_domains, f, indent=2)
|
||||||
|
|||||||
Reference in New Issue
Block a user