mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
1 Commits
fix/kms_en
...
cursor/win
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4147d5ad8c |
5
.github/dependabot.yml
vendored
5
.github/dependabot.yml
vendored
@@ -26,9 +26,6 @@ updates:
|
||||
day: "monday"
|
||||
timezone: "Asia/Shanghai"
|
||||
time: "08:00"
|
||||
ignore:
|
||||
- dependency-name: "object_store"
|
||||
versions: [ "0.13.x" ]
|
||||
groups:
|
||||
s3s:
|
||||
update-types:
|
||||
@@ -39,4 +36,4 @@ updates:
|
||||
- "s3s-*"
|
||||
dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
- "*"
|
||||
|
||||
1
.github/workflows/helm-package.yml
vendored
1
.github/workflows/helm-package.yml
vendored
@@ -44,6 +44,7 @@ jobs:
|
||||
set -x
|
||||
old_version=$(grep "^appVersion:" helm/rustfs/Chart.yaml | awk '{print $2}')
|
||||
sed -i "s/$old_version/$new_version/g" helm/rustfs/Chart.yaml
|
||||
sed -i "/^image:/,/^[^ ]/ s/tag:.*/tag: "$new_version"/" helm/rustfs/values.yaml
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
44
.vscode/launch.json
vendored
44
.vscode/launch.json
vendored
@@ -121,50 +121,6 @@
|
||||
"rust"
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Debug executable target/debug/rustfs with sse",
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/target/debug/rustfs",
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
//"preLaunchTask": "cargo build",
|
||||
"env": {
|
||||
"RUSTFS_ACCESS_KEY": "rustfsadmin",
|
||||
"RUSTFS_SECRET_KEY": "rustfsadmin",
|
||||
"RUSTFS_VOLUMES": "./target/volume/test{1...4}",
|
||||
"RUSTFS_ADDRESS": ":9000",
|
||||
"RUSTFS_CONSOLE_ENABLE": "true",
|
||||
"RUSTFS_CONSOLE_ADDRESS": "127.0.0.1:9001",
|
||||
"RUSTFS_OBS_LOG_DIRECTORY": "./target/logs",
|
||||
// "RUSTFS_OBS_TRACE_ENDPOINT": "http://127.0.0.1:4318/v1/traces", // jeager otlp http endpoint
|
||||
// "RUSTFS_OBS_METRIC_ENDPOINT": "http://127.0.0.1:4318/v1/metrics", // default otlp http endpoint
|
||||
// "RUSTFS_OBS_LOG_ENDPOINT": "http://127.0.0.1:4318/v1/logs", // default otlp http endpoint
|
||||
// "RUSTFS_COMPRESS_ENABLE": "true",
|
||||
|
||||
// 1. simple sse test key (no kms system)
|
||||
// "__RUSTFS_SSE_SIMPLE_CMK": "2dfNXGHlsEflGVCxb+5DIdGEl1sIvtwX+QfmYasi5QM=",
|
||||
|
||||
// 2. kms local backend test key
|
||||
"RUSTFS_KMS_ENABLE": "true",
|
||||
"RUSTFS_KMS_BACKEND": "local",
|
||||
"RUSTFS_KMS_KEY_DIR": "./target/kms-key-dir",
|
||||
"RUSTFS_KMS_LOCAL_MASTER_KEY": "my-secret-key", // Some Password
|
||||
"RUSTFS_KMS_DEFAULT_KEY_ID": "rustfs-master-key",
|
||||
|
||||
// 3. kms vault backend test key
|
||||
// "RUSTFS_KMS_ENABLE": "true",
|
||||
// "RUSTFS_KMS_BACKEND": "vault",
|
||||
// "RUSTFS_KMS_VAULT_ADDRESS": "http://127.0.0.1:8200",
|
||||
// "RUSTFS_KMS_VAULT_TOKEN": "Dev Token",
|
||||
// "RUSTFS_KMS_DEFAULT_KEY_ID": "rustfs-master-key",
|
||||
|
||||
},
|
||||
"sourceLanguages": [
|
||||
"rust"
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Debug executable target/debug/test",
|
||||
"type": "lldb",
|
||||
|
||||
1247
Cargo.lock
generated
1247
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
59
Cargo.toml
59
Cargo.toml
@@ -50,7 +50,7 @@ resolver = "2"
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/rustfs/rustfs"
|
||||
rust-version = "1.90"
|
||||
rust-version = "1.88"
|
||||
version = "0.0.5"
|
||||
homepage = "https://rustfs.com"
|
||||
description = "RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. "
|
||||
@@ -96,7 +96,7 @@ rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
|
||||
# Async Runtime and Networking
|
||||
async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.37" }
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.8"
|
||||
@@ -121,7 +121,7 @@ tokio-util = { version = "0.7.18", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.3", features = ["timeout"] }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.8", features = ["cors"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
@@ -130,27 +130,27 @@ bytesize = "2.3.1"
|
||||
byteorder = "1.5.0"
|
||||
flatbuffers = "25.12.19"
|
||||
form_urlencoded = "1.2.2"
|
||||
prost = "0.14.3"
|
||||
quick-xml = "0.39.0"
|
||||
prost = "0.14.1"
|
||||
quick-xml = "0.38.4"
|
||||
rmcp = { version = "0.12.0" }
|
||||
rmp = { version = "0.8.15" }
|
||||
rmp-serde = { version = "1.3.1" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.149", features = ["raw_value"] }
|
||||
serde_json = { version = "1.0.148", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.2.0"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
argon2 = { version = "0.6.0-rc.5" }
|
||||
blake3 = { version = "1.8.3", features = ["rayon", "mmap"] }
|
||||
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
|
||||
chacha20poly1305 = { version = "0.11.0-rc.2" }
|
||||
crc-fast = "1.9.0"
|
||||
crc-fast = "1.6.0"
|
||||
hmac = { version = "0.13.0-rc.3" }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] }
|
||||
pbkdf2 = "0.13.0-rc.7"
|
||||
rsa = { version = "0.10.0-rc.12" }
|
||||
rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
|
||||
pbkdf2 = "0.13.0-rc.5"
|
||||
rsa = { version = "0.10.0-rc.11" }
|
||||
rustls = { version = "0.23.35" }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.2"
|
||||
sha1 = "0.11.0-rc.3"
|
||||
@@ -159,9 +159,9 @@ subtle = "2.6"
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
|
||||
# Time and Date
|
||||
chrono = { version = "0.4.43", features = ["serde"] }
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
humantime = "2.3.0"
|
||||
time = { version = "0.3.45", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
@@ -172,7 +172,7 @@ atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.12" }
|
||||
aws-credential-types = { version = "1.2.11" }
|
||||
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "default-https-client", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.6" }
|
||||
aws-smithy-types = { version = "1.3.5" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
@@ -182,25 +182,24 @@ const-str = { version = "1.0.0", features = ["std", "proc"] }
|
||||
convert_case = "0.10.0"
|
||||
criterion = { version = "0.8", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "52.0.0"
|
||||
datafusion = "51.0.0"
|
||||
derive_builder = "0.20.2"
|
||||
dunce = "1.0.5"
|
||||
enumset = "1.1.10"
|
||||
faster-hex = "0.10.0"
|
||||
flate2 = "1.1.8"
|
||||
flate2 = "1.1.5"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
|
||||
glob = "0.3.3"
|
||||
google-cloud-storage = "1.6.0"
|
||||
google-cloud-auth = "1.4.0"
|
||||
google-cloud-storage = "1.5.0"
|
||||
google-cloud-auth = "1.3.0"
|
||||
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
|
||||
heed = { version = "0.22.0" }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.180"
|
||||
libc = "0.2.179"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.9"
|
||||
local-ip-address = "0.6.8"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.9.1"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
@@ -223,9 +222,9 @@ rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
rumqttc = { version = "0.25.1" }
|
||||
rust-embed = { version = "8.11.0" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
s3s = { version = "0.13.0-alpha.2", features = ["minio"] }
|
||||
s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", rev = "ac13a56" }
|
||||
serial_test = "3.3.1"
|
||||
shadow-rs = { version = "1.5.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
@@ -243,18 +242,18 @@ thiserror = "2.0.17"
|
||||
tracing = { version = "0.1.44" }
|
||||
tracing-appender = "0.2.4"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.8"
|
||||
url = "2.5.7"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
vaultrs = { version = "0.7.4" }
|
||||
walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.6.1", features = ["serde"] }
|
||||
windows = { version = "0.62.2" }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "7.1.0"
|
||||
zip = "7.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
# Observability and Metrics
|
||||
@@ -270,8 +269,8 @@ opentelemetry-stdout = { version = "0.31.0" }
|
||||
libunftp = "0.21.0"
|
||||
russh = { version = "0.56.0", features = ["aws-lc-rs", "rsa"], default-features = false }
|
||||
russh-sftp = "2.1.1"
|
||||
ssh-key = { version = "0.7.0-rc.6", features = ["std", "rsa", "ed25519"] }
|
||||
suppaftp = { version = "7.1.0", features = ["tokio", "tokio-rustls", "rustls"] }
|
||||
ssh-key = { version = "0.7.0-rc.4", features = ["std", "rsa", "ed25519"] }
|
||||
suppaftp = { version = "7.0.7", features = ["tokio", "tokio-rustls", "rustls"] }
|
||||
rcgen = "0.14.6"
|
||||
|
||||
# Performance Analysis and Memory Profiling
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.23 AS build
|
||||
FROM alpine:3.22 AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
@@ -40,7 +40,7 @@ RUN set -eux; \
|
||||
rm -rf rustfs.zip /build/.tmp || true
|
||||
|
||||
|
||||
FROM alpine:3.23
|
||||
FROM alpine:3.22
|
||||
|
||||
ARG RELEASE=latest
|
||||
ARG BUILD_DATE
|
||||
|
||||
@@ -16,7 +16,7 @@ ARG BUILDPLATFORM
|
||||
# -----------------------------
|
||||
# Build stage
|
||||
# -----------------------------
|
||||
FROM rust:1.91-trixie AS builder
|
||||
FROM rust:1.88-bookworm AS builder
|
||||
|
||||
# Re-declare args after FROM
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
@@ -83,13 +83,6 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2.
|
||||
| **Edge & IoT** | **Strong Edge Support**<br>Ideal for secure, innovative edge devices. | **Weak Edge Support**<br>Often too heavy for edge gateways. |
|
||||
| **Risk Profile** | **Enterprise Risk Mitigation**<br>Clear IP rights and safe for commercial use. | **Legal Risks**<br>Intellectual property ambiguity and usage restrictions. |
|
||||
|
||||
|
||||
## Staying ahead
|
||||
|
||||
Star RustFS on GitHub and be instantly notified of new releases.
|
||||
|
||||
<img src="https://github.com/user-attachments/assets/7ee40bb4-3e46-4eac-b0d0-5fbeb85ff8f3" />
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get started with RustFS, follow these steps:
|
||||
|
||||
@@ -86,15 +86,6 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust
|
||||
| **成本** | **稳定且免费**<br>免费社区支持,稳定的商业定价。 | **高昂成本**<br>1PiB 的成本可能高达 250,000 美元。 |
|
||||
| **风险控制** | **企业级风险规避**<br>清晰的知识产权,商业使用安全无忧。 | **法律风险**<br>知识产权归属模糊及使用限制风险。 |
|
||||
|
||||
|
||||
## 保持领先
|
||||
|
||||
在 GitHub 上为 RustFS 点赞,即可第一时间收到新版本发布通知。
|
||||
|
||||
<img src="https://github.com/user-attachments/assets/7ee40bb4-3e46-4eac-b0d0-5fbeb85ff8f3" />
|
||||
|
||||
|
||||
|
||||
## 快速开始
|
||||
|
||||
请按照以下步骤快速上手 RustFS:
|
||||
|
||||
@@ -306,7 +306,7 @@ fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Res
|
||||
versions_count = versions_count.saturating_add(1);
|
||||
|
||||
if latest_file_info.is_none()
|
||||
&& let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false, false)
|
||||
&& let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false)
|
||||
{
|
||||
latest_file_info = Some(info);
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ use futures::stream::FuturesUnordered;
|
||||
use hashbrown::{HashMap, HashSet};
|
||||
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, EnableState, audit::AUDIT_ROUTE_PREFIX};
|
||||
use rustfs_ecstore::config::{Config, KVS};
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
@@ -393,80 +392,4 @@ impl AuditRegistry {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a unique key for a target based on its type and ID
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The unique key for the target.
|
||||
pub fn create_key(&self, target_type: &str, target_id: &str) -> String {
|
||||
let key = TargetID::new(target_id.to_string(), target_type.to_string());
|
||||
info!(target_type = %target_type, "Create key for {}", key);
|
||||
key.to_string()
|
||||
}
|
||||
|
||||
/// Enables a target (placeholder, assumes target exists)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn enable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
if self.get_target(&key).is_some() {
|
||||
info!("Target {}-{} enabled", target_type, target_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(
|
||||
format!("Target not found: {}-{}", target_type, target_id),
|
||||
None,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Disables a target (placeholder, assumes target exists)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn disable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
if self.get_target(&key).is_some() {
|
||||
info!("Target {}-{} disabled", target_type, target_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(
|
||||
format!("Target not found: {}-{}", target_type, target_id),
|
||||
None,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Upserts a target into the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
/// * `target` - The target instance to be upserted.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn upsert_target(
|
||||
&mut self,
|
||||
target_type: &str,
|
||||
target_id: &str,
|
||||
target: Box<dyn Target<AuditEntry> + Send + Sync>,
|
||||
) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
self.targets.insert(key, target);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,9 +274,9 @@ impl AuditSystem {
|
||||
drop(state);
|
||||
|
||||
let registry = self.registry.lock().await;
|
||||
let target_keys = registry.list_targets();
|
||||
let target_ids = registry.list_targets();
|
||||
|
||||
if target_keys.is_empty() {
|
||||
if target_ids.is_empty() {
|
||||
warn!("No audit targets configured for dispatch");
|
||||
return Ok(());
|
||||
}
|
||||
@@ -284,22 +284,22 @@ impl AuditSystem {
|
||||
// Dispatch to all targets concurrently
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for target_key in target_keys {
|
||||
if let Some(target) = registry.get_target(&target_key) {
|
||||
for target_id in target_ids {
|
||||
if let Some(target) = registry.get_target(&target_id) {
|
||||
let entry_clone = Arc::clone(&entry);
|
||||
let target_key_clone = target_key.clone();
|
||||
let target_id_clone = target_id.clone();
|
||||
|
||||
// Create EntityTarget for the audit log entry
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: entry.event, // Default, should be derived from entry
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry
|
||||
data: (*entry_clone).clone(),
|
||||
};
|
||||
|
||||
let task = async move {
|
||||
let result = target.save(Arc::new(entity_target)).await;
|
||||
(target_key_clone, result)
|
||||
(target_id_clone, result)
|
||||
};
|
||||
|
||||
tasks.push(task);
|
||||
@@ -312,14 +312,14 @@ impl AuditSystem {
|
||||
let mut errors = Vec::new();
|
||||
let mut success_count = 0;
|
||||
|
||||
for (target_key, result) in results {
|
||||
for (target_id, result) in results {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
success_count += 1;
|
||||
observability::record_target_success();
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target_id = %target_key, error = %e, "Failed to dispatch audit log to target");
|
||||
error!(target_id = %target_id, error = %e, "Failed to dispatch audit log to target");
|
||||
errors.push(e);
|
||||
observability::record_target_failure();
|
||||
}
|
||||
@@ -360,18 +360,18 @@ impl AuditSystem {
|
||||
drop(state);
|
||||
|
||||
let registry = self.registry.lock().await;
|
||||
let target_keys = registry.list_targets();
|
||||
let target_ids = registry.list_targets();
|
||||
|
||||
if target_keys.is_empty() {
|
||||
if target_ids.is_empty() {
|
||||
warn!("No audit targets configured for batch dispatch");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for target_key in target_keys {
|
||||
if let Some(target) = registry.get_target(&target_key) {
|
||||
for target_id in target_ids {
|
||||
if let Some(target) = registry.get_target(&target_id) {
|
||||
let entries_clone: Vec<_> = entries.iter().map(Arc::clone).collect();
|
||||
let target_key_clone = target_key.clone();
|
||||
let target_id_clone = target_id.clone();
|
||||
|
||||
let task = async move {
|
||||
let mut success_count = 0;
|
||||
@@ -380,7 +380,7 @@ impl AuditSystem {
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: entry.event,
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut,
|
||||
data: (*entry).clone(),
|
||||
};
|
||||
match target.save(Arc::new(entity_target)).await {
|
||||
@@ -388,7 +388,7 @@ impl AuditSystem {
|
||||
Err(e) => errors.push(e),
|
||||
}
|
||||
}
|
||||
(target_key_clone, success_count, errors)
|
||||
(target_id_clone, success_count, errors)
|
||||
};
|
||||
tasks.push(task);
|
||||
}
|
||||
@@ -418,7 +418,6 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Starts the audit stream processing for a target with batching and retry logic
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `store` - The store from which to read audit entries
|
||||
/// * `target` - The target to which audit entries will be sent
|
||||
@@ -502,7 +501,7 @@ impl AuditSystem {
|
||||
/// Enables a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to enable, TargetID to string
|
||||
/// * `target_id` - The ID of the target to enable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
@@ -521,7 +520,7 @@ impl AuditSystem {
|
||||
/// Disables a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to disable, TargetID to string
|
||||
/// * `target_id` - The ID of the target to disable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
@@ -540,7 +539,7 @@ impl AuditSystem {
|
||||
/// Removes a target from the system
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to remove, TargetID to string
|
||||
/// * `target_id` - The ID of the target to remove
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
@@ -560,7 +559,7 @@ impl AuditSystem {
|
||||
/// Updates or inserts a target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to upsert, TargetID to string
|
||||
/// * `target_id` - The ID of the target to upsert
|
||||
/// * `target` - The target instance to insert or update
|
||||
///
|
||||
/// # Returns
|
||||
@@ -597,7 +596,7 @@ impl AuditSystem {
|
||||
/// Gets information about a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to retrieve, TargetID to string
|
||||
/// * `target_id` - The ID of the target to retrieve
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - Target ID if found
|
||||
|
||||
@@ -96,11 +96,6 @@ pub enum Metric {
|
||||
ApplyNonCurrent,
|
||||
HealAbandonedVersion,
|
||||
|
||||
// Quota metrics:
|
||||
QuotaCheck,
|
||||
QuotaViolation,
|
||||
QuotaSync,
|
||||
|
||||
// START Trace metrics:
|
||||
StartTrace,
|
||||
ScanObject, // Scan object. All operations included.
|
||||
@@ -136,9 +131,6 @@ impl Metric {
|
||||
Self::CleanAbandoned => "clean_abandoned",
|
||||
Self::ApplyNonCurrent => "apply_non_current",
|
||||
Self::HealAbandonedVersion => "heal_abandoned_version",
|
||||
Self::QuotaCheck => "quota_check",
|
||||
Self::QuotaViolation => "quota_violation",
|
||||
Self::QuotaSync => "quota_sync",
|
||||
Self::StartTrace => "start_trace",
|
||||
Self::ScanObject => "scan_object",
|
||||
Self::HealAbandonedObject => "heal_abandoned_object",
|
||||
@@ -171,18 +163,15 @@ impl Metric {
|
||||
10 => Some(Self::CleanAbandoned),
|
||||
11 => Some(Self::ApplyNonCurrent),
|
||||
12 => Some(Self::HealAbandonedVersion),
|
||||
13 => Some(Self::QuotaCheck),
|
||||
14 => Some(Self::QuotaViolation),
|
||||
15 => Some(Self::QuotaSync),
|
||||
16 => Some(Self::StartTrace),
|
||||
17 => Some(Self::ScanObject),
|
||||
18 => Some(Self::HealAbandonedObject),
|
||||
19 => Some(Self::LastRealtime),
|
||||
20 => Some(Self::ScanFolder),
|
||||
21 => Some(Self::ScanCycle),
|
||||
22 => Some(Self::ScanBucketDrive),
|
||||
23 => Some(Self::CompactFolder),
|
||||
24 => Some(Self::Last),
|
||||
13 => Some(Self::StartTrace),
|
||||
14 => Some(Self::ScanObject),
|
||||
15 => Some(Self::HealAbandonedObject),
|
||||
16 => Some(Self::LastRealtime),
|
||||
17 => Some(Self::ScanFolder),
|
||||
18 => Some(Self::ScanCycle),
|
||||
19 => Some(Self::ScanBucketDrive),
|
||||
20 => Some(Self::CompactFolder),
|
||||
21 => Some(Self::Last),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ pub(crate) mod heal;
|
||||
pub(crate) mod object;
|
||||
pub(crate) mod profiler;
|
||||
pub(crate) mod protocols;
|
||||
pub(crate) mod quota;
|
||||
pub(crate) mod runtime;
|
||||
pub(crate) mod targets;
|
||||
pub(crate) mod tls;
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const QUOTA_CONFIG_FILE: &str = "quota.json";
|
||||
pub const QUOTA_TYPE_HARD: &str = "HARD";
|
||||
|
||||
pub const QUOTA_EXCEEDED_ERROR_CODE: &str = "XRustfsQuotaExceeded";
|
||||
pub const QUOTA_INVALID_CONFIG_ERROR_CODE: &str = "InvalidArgument";
|
||||
pub const QUOTA_NOT_FOUND_ERROR_CODE: &str = "NoSuchBucket";
|
||||
pub const QUOTA_INTERNAL_ERROR_CODE: &str = "InternalError";
|
||||
|
||||
pub const QUOTA_API_PATH: &str = "/rustfs/admin/v3/quota/{bucket}";
|
||||
|
||||
pub const QUOTA_INVALID_TYPE_ERROR_MSG: &str = "Only HARD quota type is supported";
|
||||
pub const QUOTA_METADATA_SYSTEM_ERROR_MSG: &str = "Bucket metadata system not initialized";
|
||||
@@ -33,8 +33,6 @@ pub use constants::profiler::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::protocols::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::quota::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::runtime::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::targets::*;
|
||||
|
||||
@@ -26,7 +26,6 @@ workspace = true
|
||||
[dependencies]
|
||||
rustfs-ecstore.workspace = true
|
||||
rustfs-common.workspace = true
|
||||
rustfs-iam.workspace = true
|
||||
flatbuffers.workspace = true
|
||||
futures.workspace = true
|
||||
rustfs-lock.workspace = true
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Regression test for Issue #1423
|
||||
//! Verifies that Bucket Policies are honored for Authenticated Users.
|
||||
|
||||
use crate::common::{RustFSTestEnvironment, init_logging};
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::{Client, Config};
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
async fn create_user(
|
||||
env: &RustFSTestEnvironment,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let create_user_body = serde_json::json!({
|
||||
"secretKey": password,
|
||||
"status": "enabled"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, username);
|
||||
crate::common::awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_user_client(env: &RustFSTestEnvironment, access_key: &str, secret_key: &str) -> Client {
|
||||
let credentials = Credentials::new(access_key, secret_key, None, None, "test-user");
|
||||
let config = Config::builder()
|
||||
.credentials_provider(credentials)
|
||||
.region(Region::new("us-east-1"))
|
||||
.endpoint_url(&env.url)
|
||||
.force_path_style(true)
|
||||
.behavior_version_latest()
|
||||
.build();
|
||||
|
||||
Client::from_conf(config)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_bucket_policy_authenticated_user() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting test_bucket_policy_authenticated_user...");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await?;
|
||||
env.start_rustfs_server(vec![]).await?;
|
||||
|
||||
let admin_client = env.create_s3_client();
|
||||
let bucket_name = "bucket-policy-auth-test";
|
||||
let object_key = "test-object.txt";
|
||||
let user_access = "testuser";
|
||||
let user_secret = "testpassword";
|
||||
|
||||
// 1. Create Bucket (Admin)
|
||||
admin_client.create_bucket().bucket(bucket_name).send().await?;
|
||||
|
||||
// 2. Create User (Admin API)
|
||||
create_user(&env, user_access, user_secret).await?;
|
||||
|
||||
// 3. Create User Client
|
||||
let user_client = create_user_client(&env, user_access, user_secret);
|
||||
|
||||
// 4. Verify Access Denied initially (No Policy)
|
||||
let result = user_client.list_objects_v2().bucket(bucket_name).send().await;
|
||||
if result.is_ok() {
|
||||
return Err("Should be Access Denied initially".into());
|
||||
}
|
||||
|
||||
// 5. Apply Bucket Policy Allowed User
|
||||
let policy_json = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AllowTestUser",
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"AWS": [user_access]
|
||||
},
|
||||
"Action": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject",
|
||||
"s3:PutObject",
|
||||
"s3:DeleteObject"
|
||||
],
|
||||
"Resource": [
|
||||
format!("arn:aws:s3:::{}", bucket_name),
|
||||
format!("arn:aws:s3:::{}/*", bucket_name)
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
admin_client
|
||||
.put_bucket_policy()
|
||||
.bucket(bucket_name)
|
||||
.policy(&policy_json)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// 6. Verify Access Allowed (With Bucket Policy)
|
||||
info!("Verifying PutObject...");
|
||||
user_client
|
||||
.put_object()
|
||||
.bucket(bucket_name)
|
||||
.key(object_key)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from_static(b"hello world"))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("PutObject failed: {}", e))?;
|
||||
|
||||
info!("Verifying ListObjects...");
|
||||
let list_res = user_client
|
||||
.list_objects_v2()
|
||||
.bucket(bucket_name)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("ListObjects failed: {}", e))?;
|
||||
assert_eq!(list_res.contents().len(), 1);
|
||||
|
||||
info!("Verifying GetObject...");
|
||||
user_client
|
||||
.get_object()
|
||||
.bucket(bucket_name)
|
||||
.key(object_key)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("GetObject failed: {}", e))?;
|
||||
|
||||
info!("Verifying DeleteObject...");
|
||||
user_client
|
||||
.delete_object()
|
||||
.bucket(bucket_name)
|
||||
.key(object_key)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("DeleteObject failed: {}", e))?;
|
||||
|
||||
info!("Test Passed!");
|
||||
Ok(())
|
||||
}
|
||||
@@ -176,14 +176,12 @@ impl RustFSTestEnvironment {
|
||||
/// Kill any existing RustFS processes
|
||||
pub async fn cleanup_existing_processes(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
info!("Cleaning up any existing RustFS processes");
|
||||
let binary_path = rustfs_binary_path();
|
||||
let binary_name = binary_path.to_string_lossy();
|
||||
let output = Command::new("pkill").args(["-f", &binary_name]).output();
|
||||
let output = Command::new("pkill").args(["-f", "rustfs"]).output();
|
||||
|
||||
if let Ok(output) = output
|
||||
&& output.status.success()
|
||||
{
|
||||
info!("Killed existing RustFS processes: {}", binary_name);
|
||||
info!("Killed existing RustFS processes");
|
||||
sleep(Duration::from_millis(1000)).await;
|
||||
}
|
||||
Ok(())
|
||||
@@ -365,12 +363,3 @@ pub async fn awscurl_put(
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
execute_awscurl(url, "PUT", Some(body), access_key, secret_key).await
|
||||
}
|
||||
|
||||
/// Helper function for DELETE requests
|
||||
pub async fn awscurl_delete(
|
||||
url: &str,
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
execute_awscurl(url, "DELETE", None, access_key, secret_key).await
|
||||
}
|
||||
|
||||
@@ -29,13 +29,6 @@ mod data_usage_test;
|
||||
#[cfg(test)]
|
||||
mod kms;
|
||||
|
||||
// Quota tests
|
||||
#[cfg(test)]
|
||||
mod quota_test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod bucket_policy_check_test;
|
||||
|
||||
// Special characters in path test modules
|
||||
#[cfg(test)]
|
||||
mod special_chars_test;
|
||||
|
||||
@@ -1,798 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::common::{RustFSTestEnvironment, awscurl_delete, awscurl_get, awscurl_post, awscurl_put, init_logging};
|
||||
use aws_sdk_s3::Client;
|
||||
use serial_test::serial;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Test environment setup for quota tests
|
||||
pub struct QuotaTestEnv {
|
||||
pub env: RustFSTestEnvironment,
|
||||
pub client: Client,
|
||||
pub bucket_name: String,
|
||||
}
|
||||
|
||||
impl QuotaTestEnv {
|
||||
pub async fn new() -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let bucket_name = format!("quota-test-{}", uuid::Uuid::new_v4());
|
||||
let mut env = RustFSTestEnvironment::new().await?;
|
||||
env.start_rustfs_server(vec![]).await?;
|
||||
let client = env.create_s3_client();
|
||||
|
||||
Ok(Self {
|
||||
env,
|
||||
client,
|
||||
bucket_name,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn create_bucket(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
self.env.create_test_bucket(&self.bucket_name).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cleanup_bucket(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let objects = self.client.list_objects_v2().bucket(&self.bucket_name).send().await?;
|
||||
for object in objects.contents() {
|
||||
self.client
|
||||
.delete_object()
|
||||
.bucket(&self.bucket_name)
|
||||
.key(object.key().unwrap_or_default())
|
||||
.send()
|
||||
.await?;
|
||||
}
|
||||
self.env.delete_test_bucket(&self.bucket_name).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn set_bucket_quota(&self, quota_bytes: u64) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
|
||||
let quota_config = serde_json::json!({
|
||||
"quota": quota_bytes,
|
||||
"quota_type": "HARD"
|
||||
});
|
||||
|
||||
let response = awscurl_put(&url, "a_config.to_string(), &self.env.access_key, &self.env.secret_key).await?;
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to set quota: {}", response).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_bucket_quota(&self) -> Result<Option<u64>, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
|
||||
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to get quota: {}", response).into())
|
||||
} else {
|
||||
let quota_info: serde_json::Value = serde_json::from_str(&response)?;
|
||||
Ok(quota_info.get("quota").and_then(|v| v.as_u64()))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn clear_bucket_quota(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
|
||||
let response = awscurl_delete(&url, &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to clear quota: {}", response).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_bucket_quota_stats(&self) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota-stats/{}", self.env.url, self.bucket_name);
|
||||
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to get quota stats: {}", response).into())
|
||||
} else {
|
||||
Ok(serde_json::from_str(&response)?)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn check_bucket_quota(
|
||||
&self,
|
||||
operation_type: &str,
|
||||
operation_size: u64,
|
||||
) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota-check/{}", self.env.url, self.bucket_name);
|
||||
let check_request = serde_json::json!({
|
||||
"operation_type": operation_type,
|
||||
"operation_size": operation_size
|
||||
});
|
||||
|
||||
let response = awscurl_post(&url, &check_request.to_string(), &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to check quota: {}", response).into())
|
||||
} else {
|
||||
Ok(serde_json::from_str(&response)?)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn upload_object(&self, key: &str, size_bytes: usize) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let data = vec![0u8; size_bytes];
|
||||
self.client
|
||||
.put_object()
|
||||
.bucket(&self.bucket_name)
|
||||
.key(key)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(data))
|
||||
.send()
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn object_exists(&self, key: &str) -> Result<bool, Box<dyn std::error::Error + Send + Sync>> {
|
||||
match self.client.head_object().bucket(&self.bucket_name).key(key).send().await {
|
||||
Ok(_) => Ok(true),
|
||||
Err(e) => {
|
||||
// Check for any 404-related errors and return false instead of propagating
|
||||
let error_str = e.to_string();
|
||||
if error_str.contains("404") || error_str.contains("Not Found") || error_str.contains("NotFound") {
|
||||
Ok(false)
|
||||
} else {
|
||||
// Also check the error code directly
|
||||
if let Some(service_err) = e.as_service_error()
|
||||
&& service_err.is_not_found()
|
||||
{
|
||||
return Ok(false);
|
||||
}
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_bucket_usage(&self) -> Result<u64, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let stats = self.get_bucket_quota_stats().await?;
|
||||
Ok(stats.get("current_usage").and_then(|v| v.as_u64()).unwrap_or(0))
|
||||
}
|
||||
|
||||
pub async fn set_bucket_quota_for(
|
||||
&self,
|
||||
bucket: &str,
|
||||
quota_bytes: u64,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, bucket);
|
||||
let quota_config = serde_json::json!({
|
||||
"quota": quota_bytes,
|
||||
"quota_type": "HARD"
|
||||
});
|
||||
|
||||
let response = awscurl_put(&url, "a_config.to_string(), &self.env.access_key, &self.env.secret_key).await?;
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to set quota: {}", response).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get bucket quota statistics for specific bucket
|
||||
pub async fn get_bucket_quota_stats_for(
|
||||
&self,
|
||||
bucket: &str,
|
||||
) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
|
||||
debug!("Getting quota stats for bucket: {}", bucket);
|
||||
|
||||
let url = format!("{}/rustfs/admin/v3/quota-stats/{}", self.env.url, bucket);
|
||||
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to get quota stats: {}", response).into())
|
||||
} else {
|
||||
let stats: serde_json::Value = serde_json::from_str(&response)?;
|
||||
Ok(stats)
|
||||
}
|
||||
}
|
||||
|
||||
/// Upload an object to specific bucket
|
||||
pub async fn upload_object_to_bucket(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
size_bytes: usize,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
debug!("Uploading object {} with size {} bytes to bucket {}", key, size_bytes, bucket);
|
||||
|
||||
let data = vec![0u8; size_bytes];
|
||||
|
||||
self.client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(data))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
info!("Successfully uploaded object: {} ({} bytes) to bucket: {}", key, size_bytes, bucket);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod integration_tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_basic_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
// Create test bucket
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 1MB
|
||||
env.set_bucket_quota(1024 * 1024).await?;
|
||||
|
||||
// Verify quota is set
|
||||
let quota = env.get_bucket_quota().await?;
|
||||
assert_eq!(quota, Some(1024 * 1024));
|
||||
|
||||
// Upload a 512KB object (should succeed)
|
||||
env.upload_object("test1.txt", 512 * 1024).await?;
|
||||
assert!(env.object_exists("test1.txt").await?);
|
||||
|
||||
// Upload another 512KB object (should succeed, total 1MB)
|
||||
env.upload_object("test2.txt", 512 * 1024).await?;
|
||||
assert!(env.object_exists("test2.txt").await?);
|
||||
|
||||
// Try to upload 1KB more (should fail due to quota)
|
||||
let upload_result = env.upload_object("test3.txt", 1024).await;
|
||||
assert!(upload_result.is_err());
|
||||
assert!(!env.object_exists("test3.txt").await?);
|
||||
|
||||
// Clean up
|
||||
env.clear_bucket_quota().await?;
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_update_and_clear() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set initial quota
|
||||
env.set_bucket_quota(512 * 1024).await?;
|
||||
assert_eq!(env.get_bucket_quota().await?, Some(512 * 1024));
|
||||
|
||||
// Update quota to larger size
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
assert_eq!(env.get_bucket_quota().await?, Some(2 * 1024 * 1024));
|
||||
|
||||
// Upload 1MB object (should succeed with new quota)
|
||||
env.upload_object("large_file.txt", 1024 * 1024).await?;
|
||||
assert!(env.object_exists("large_file.txt").await?);
|
||||
|
||||
// Clear quota
|
||||
env.clear_bucket_quota().await?;
|
||||
assert_eq!(env.get_bucket_quota().await?, None);
|
||||
|
||||
// Upload another large object (should succeed with no quota)
|
||||
env.upload_object("unlimited_file.txt", 5 * 1024 * 1024).await?;
|
||||
assert!(env.object_exists("unlimited_file.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_delete_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 1MB
|
||||
env.set_bucket_quota(1024 * 1024).await?;
|
||||
|
||||
// Fill up to quota limit
|
||||
env.upload_object("file1.txt", 512 * 1024).await?;
|
||||
env.upload_object("file2.txt", 512 * 1024).await?;
|
||||
|
||||
// Delete one file
|
||||
env.client
|
||||
.delete_object()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("file1.txt")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
assert!(!env.object_exists("file1.txt").await?);
|
||||
|
||||
// Now we should be able to upload again (quota freed up)
|
||||
env.upload_object("file3.txt", 256 * 1024).await?;
|
||||
assert!(env.object_exists("file3.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_usage_tracking() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
|
||||
// Upload some files
|
||||
env.upload_object("file1.txt", 512 * 1024).await?;
|
||||
env.upload_object("file2.txt", 256 * 1024).await?;
|
||||
|
||||
// Check usage
|
||||
let usage = env.get_bucket_usage().await?;
|
||||
assert_eq!(usage, (512 + 256) * 1024);
|
||||
|
||||
// Delete a file
|
||||
env.client
|
||||
.delete_object()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("file1.txt")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Check updated usage
|
||||
let updated_usage = env.get_bucket_usage().await?;
|
||||
assert_eq!(updated_usage, 256 * 1024);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_statistics() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 2MB
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
|
||||
// Upload files to use 1.5MB
|
||||
env.upload_object("file1.txt", 1024 * 1024).await?;
|
||||
env.upload_object("file2.txt", 512 * 1024).await?;
|
||||
|
||||
// Get detailed quota statistics
|
||||
let stats = env.get_bucket_quota_stats().await?;
|
||||
|
||||
assert_eq!(stats.get("bucket").unwrap().as_str().unwrap(), env.bucket_name);
|
||||
assert_eq!(stats.get("quota_limit").unwrap().as_u64().unwrap(), 2 * 1024 * 1024);
|
||||
assert_eq!(stats.get("current_usage").unwrap().as_u64().unwrap(), (1024 + 512) * 1024);
|
||||
assert_eq!(stats.get("remaining_quota").unwrap().as_u64().unwrap(), 512 * 1024);
|
||||
|
||||
let usage_percentage = stats.get("usage_percentage").unwrap().as_f64().unwrap();
|
||||
assert!((usage_percentage - 75.0).abs() < 0.1);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_check_api() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 1MB
|
||||
env.set_bucket_quota(1024 * 1024).await?;
|
||||
|
||||
// Upload 512KB file
|
||||
env.upload_object("existing_file.txt", 512 * 1024).await?;
|
||||
|
||||
// Check if we can upload another 512KB (should succeed, exactly fill quota)
|
||||
let check_result = env.check_bucket_quota("PUT", 512 * 1024).await?;
|
||||
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
|
||||
assert_eq!(check_result.get("remaining_quota").unwrap().as_u64().unwrap(), 0);
|
||||
|
||||
// Note: we haven't actually uploaded the second file yet, so current_usage is still 512KB
|
||||
// Check if we can upload 1KB (should succeed - we haven't used the full quota yet)
|
||||
let check_result = env.check_bucket_quota("PUT", 1024).await?;
|
||||
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
|
||||
assert_eq!(check_result.get("remaining_quota").unwrap().as_u64().unwrap(), 512 * 1024 - 1024);
|
||||
|
||||
// Check if we can upload 600KB (should fail - would exceed quota)
|
||||
let check_result = env.check_bucket_quota("PUT", 600 * 1024).await?;
|
||||
assert!(!check_result.get("allowed").unwrap().as_bool().unwrap());
|
||||
|
||||
// Check delete operation (should always be allowed)
|
||||
let check_result = env.check_bucket_quota("DELETE", 512 * 1024).await?;
|
||||
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_multiple_buckets() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
// Create two buckets in the same environment
|
||||
let bucket1 = format!("quota-test-{}-1", uuid::Uuid::new_v4());
|
||||
let bucket2 = format!("quota-test-{}-2", uuid::Uuid::new_v4());
|
||||
|
||||
env.env.create_test_bucket(&bucket1).await?;
|
||||
env.env.create_test_bucket(&bucket2).await?;
|
||||
|
||||
// Set different quotas for each bucket
|
||||
env.set_bucket_quota_for(&bucket1, 1024 * 1024).await?; // 1MB
|
||||
env.set_bucket_quota_for(&bucket2, 2 * 1024 * 1024).await?; // 2MB
|
||||
|
||||
// Fill first bucket to quota
|
||||
env.upload_object_to_bucket(&bucket1, "big_file.txt", 1024 * 1024).await?;
|
||||
|
||||
// Should still be able to upload to second bucket
|
||||
env.upload_object_to_bucket(&bucket2, "big_file.txt", 1024 * 1024).await?;
|
||||
env.upload_object_to_bucket(&bucket2, "another_file.txt", 512 * 1024).await?;
|
||||
|
||||
// Verify statistics are independent
|
||||
let stats1 = env.get_bucket_quota_stats_for(&bucket1).await?;
|
||||
let stats2 = env.get_bucket_quota_stats_for(&bucket2).await?;
|
||||
|
||||
assert_eq!(stats1.get("current_usage").unwrap().as_u64().unwrap(), 1024 * 1024);
|
||||
assert_eq!(stats2.get("current_usage").unwrap().as_u64().unwrap(), (1024 + 512) * 1024);
|
||||
|
||||
// Clean up
|
||||
env.env.delete_test_bucket(&bucket1).await?;
|
||||
env.env.delete_test_bucket(&bucket2).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_error_handling() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Test invalid quota type
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", env.env.url, env.bucket_name);
|
||||
|
||||
let invalid_config = serde_json::json!({
|
||||
"quota": 1024,
|
||||
"quota_type": "SOFT" // Invalid type
|
||||
});
|
||||
|
||||
let response = awscurl_put(&url, &invalid_config.to_string(), &env.env.access_key, &env.env.secret_key).await;
|
||||
assert!(response.is_err());
|
||||
let error_msg = response.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("InvalidArgument"));
|
||||
|
||||
// Test operations on non-existent bucket
|
||||
let url = format!("{}/rustfs/admin/v3/quota/non-existent-bucket", env.env.url);
|
||||
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await;
|
||||
assert!(response.is_err());
|
||||
let error_msg = response.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("NoSuchBucket"));
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_http_endpoints() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Test 1: GET quota for bucket without quota config
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", env.env.url, env.bucket_name);
|
||||
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("quota") && response.contains("null"));
|
||||
|
||||
// Test 2: PUT quota - valid config
|
||||
let quota_config = serde_json::json!({
|
||||
"quota": 1048576,
|
||||
"quota_type": "HARD"
|
||||
});
|
||||
let response = awscurl_put(&url, "a_config.to_string(), &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("success") || !response.contains("error"));
|
||||
|
||||
// Test 3: GET quota after setting
|
||||
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("1048576"));
|
||||
|
||||
// Test 4: GET quota stats
|
||||
let stats_url = format!("{}/rustfs/admin/v3/quota-stats/{}", env.env.url, env.bucket_name);
|
||||
let response = awscurl_get(&stats_url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("quota_limit") && response.contains("current_usage"));
|
||||
|
||||
// Test 5: POST quota check
|
||||
let check_url = format!("{}/rustfs/admin/v3/quota-check/{}", env.env.url, env.bucket_name);
|
||||
let check_request = serde_json::json!({
|
||||
"operation_type": "PUT",
|
||||
"operation_size": 1024
|
||||
});
|
||||
let response = awscurl_post(&check_url, &check_request.to_string(), &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("allowed"));
|
||||
|
||||
// Test 6: DELETE quota
|
||||
let response = awscurl_delete(&url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(!response.contains("error"));
|
||||
|
||||
// Test 7: GET quota after deletion
|
||||
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("quota") && response.contains("null"));
|
||||
|
||||
// Test 8: Invalid quota type
|
||||
let invalid_config = serde_json::json!({
|
||||
"quota": 1024,
|
||||
"quota_type": "SOFT"
|
||||
});
|
||||
let response = awscurl_put(&url, &invalid_config.to_string(), &env.env.access_key, &env.env.secret_key).await;
|
||||
assert!(response.is_err());
|
||||
let error_msg = response.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("InvalidArgument"));
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_copy_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 2MB
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
|
||||
// Upload initial file
|
||||
env.upload_object("original.txt", 1024 * 1024).await?;
|
||||
|
||||
// Copy file - should succeed (1MB each, total 2MB)
|
||||
env.client
|
||||
.copy_object()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("copy1.txt")
|
||||
.copy_source(format!("{}/{}", env.bucket_name, "original.txt"))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
assert!(env.object_exists("copy1.txt").await?);
|
||||
|
||||
// Try to copy again - should fail (1.5MB each, total 3MB > 2MB quota)
|
||||
let copy_result = env
|
||||
.client
|
||||
.copy_object()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("copy2.txt")
|
||||
.copy_source(format!("{}/{}", env.bucket_name, "original.txt"))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(copy_result.is_err());
|
||||
assert!(!env.object_exists("copy2.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_batch_delete() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 2MB
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
|
||||
// Upload files to fill quota
|
||||
env.upload_object("file1.txt", 1024 * 1024).await?;
|
||||
env.upload_object("file2.txt", 1024 * 1024).await?;
|
||||
|
||||
// Verify quota is full
|
||||
let upload_result = env.upload_object("file3.txt", 1024).await;
|
||||
assert!(upload_result.is_err());
|
||||
|
||||
// Delete multiple objects using batch delete
|
||||
let objects = vec![
|
||||
aws_sdk_s3::types::ObjectIdentifier::builder()
|
||||
.key("file1.txt")
|
||||
.build()
|
||||
.unwrap(),
|
||||
aws_sdk_s3::types::ObjectIdentifier::builder()
|
||||
.key("file2.txt")
|
||||
.build()
|
||||
.unwrap(),
|
||||
];
|
||||
|
||||
let delete_result = env
|
||||
.client
|
||||
.delete_objects()
|
||||
.bucket(&env.bucket_name)
|
||||
.delete(
|
||||
aws_sdk_s3::types::Delete::builder()
|
||||
.set_objects(Some(objects))
|
||||
.quiet(true)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
assert_eq!(delete_result.deleted().len(), 2);
|
||||
|
||||
// Now should be able to upload again (quota freed up)
|
||||
env.upload_object("file3.txt", 256 * 1024).await?;
|
||||
assert!(env.object_exists("file3.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_multipart_upload() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 10MB
|
||||
env.set_bucket_quota(10 * 1024 * 1024).await?;
|
||||
|
||||
let key = "multipart_test.txt";
|
||||
let part_size = 5 * 1024 * 1024; // 5MB minimum per part (S3 requirement)
|
||||
|
||||
// Test 1: Multipart upload within quota (single 5MB part)
|
||||
let create_result = env
|
||||
.client
|
||||
.create_multipart_upload()
|
||||
.bucket(&env.bucket_name)
|
||||
.key(key)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let upload_id = create_result.upload_id().unwrap();
|
||||
|
||||
// Upload single 5MB part (S3 allows single part with any size ≥ 5MB for the only part)
|
||||
let part_data = vec![1u8; part_size];
|
||||
let part_result = env
|
||||
.client
|
||||
.upload_part()
|
||||
.bucket(&env.bucket_name)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.part_number(1)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(part_data))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let uploaded_parts = vec![
|
||||
aws_sdk_s3::types::CompletedPart::builder()
|
||||
.part_number(1)
|
||||
.e_tag(part_result.e_tag().unwrap())
|
||||
.build(),
|
||||
];
|
||||
|
||||
env.client
|
||||
.complete_multipart_upload()
|
||||
.bucket(&env.bucket_name)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(
|
||||
aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(uploaded_parts))
|
||||
.build(),
|
||||
)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
assert!(env.object_exists(key).await?);
|
||||
|
||||
// Test 2: Multipart upload exceeds quota (should fail)
|
||||
// Upload 6MB filler (total now: 5MB + 6MB = 11MB > 10MB quota)
|
||||
let upload_filler = env.upload_object("filler.txt", 6 * 1024 * 1024).await;
|
||||
// This should fail due to quota
|
||||
assert!(upload_filler.is_err());
|
||||
|
||||
// Verify filler doesn't exist
|
||||
assert!(!env.object_exists("filler.txt").await?);
|
||||
|
||||
// Now try a multipart upload that exceeds quota
|
||||
// Current usage: 5MB (from Test 1), quota: 10MB
|
||||
// Trying to upload 6MB via multipart → should fail
|
||||
|
||||
let create_result2 = env
|
||||
.client
|
||||
.create_multipart_upload()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("over_quota.txt")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let upload_id2 = create_result2.upload_id().unwrap();
|
||||
|
||||
let mut uploaded_parts2 = vec![];
|
||||
for part_num in 1..=2 {
|
||||
let part_data = vec![part_num as u8; part_size];
|
||||
let part_result = env
|
||||
.client
|
||||
.upload_part()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("over_quota.txt")
|
||||
.upload_id(upload_id2)
|
||||
.part_number(part_num)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(part_data))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
uploaded_parts2.push(
|
||||
aws_sdk_s3::types::CompletedPart::builder()
|
||||
.part_number(part_num)
|
||||
.e_tag(part_result.e_tag().unwrap())
|
||||
.build(),
|
||||
);
|
||||
}
|
||||
|
||||
let complete_result = env
|
||||
.client
|
||||
.complete_multipart_upload()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("over_quota.txt")
|
||||
.upload_id(upload_id2)
|
||||
.multipart_upload(
|
||||
aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(uploaded_parts2))
|
||||
.build(),
|
||||
)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_result.is_err());
|
||||
assert!(!env.object_exists("over_quota.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -48,7 +48,6 @@ async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
byteorder = { workspace = true }
|
||||
chrono.workspace = true
|
||||
dunce.workspace = true
|
||||
glob = { workspace = true }
|
||||
thiserror.workspace = true
|
||||
flatbuffers.workspace = true
|
||||
@@ -110,6 +109,7 @@ google-cloud-auth = { workspace = true }
|
||||
aws-config = { workspace = true }
|
||||
faster-hex = { workspace = true }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::{self, DiskAPI as _, DiskStore, error::DiskError};
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::disk::{self, DiskAPI as _, DiskStore};
|
||||
use crate::erasure_coding::{BitrotReader, BitrotWriterWrapper, CustomWriter};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use std::io::Cursor;
|
||||
|
||||
@@ -13,14 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::metadata::BucketMetadata;
|
||||
use crate::bucket::metadata_sys::get_bucket_targets_config;
|
||||
use crate::bucket::metadata_sys::get_replication_config;
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use crate::bucket::replication::ReplicationConfigurationExt;
|
||||
use crate::bucket::target::ARN;
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials};
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
use aws_credential_types::Credentials as SdkCredentials;
|
||||
use aws_sdk_s3::config::Region as SdkRegion;
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
@@ -60,6 +52,15 @@ use tracing::warn;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::bucket::metadata_sys::get_bucket_targets_config;
|
||||
use crate::bucket::metadata_sys::get_replication_config;
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use crate::bucket::replication::ReplicationConfigurationExt;
|
||||
use crate::bucket::target::ARN;
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials};
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
|
||||
const DEFAULT_HEALTH_CHECK_DURATION: Duration = Duration::from_secs(5);
|
||||
const DEFAULT_HEALTH_CHECK_RELOAD_DURATION: Duration = Duration::from_secs(30 * 60);
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::lifecycle::lifecycle;
|
||||
use super::lifecycle;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum LcEventSrc {
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, RestoreRequest, Transition,
|
||||
@@ -31,6 +30,8 @@ use time::macros::{datetime, offset};
|
||||
use time::{self, Duration, OffsetDateTime};
|
||||
use tracing::info;
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
|
||||
pub const TRANSITION_COMPLETE: &str = "complete";
|
||||
pub const TRANSITION_PENDING: &str = "pending";
|
||||
|
||||
|
||||
@@ -18,13 +18,15 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use rustfs_common::data_usage::TierStats;
|
||||
use sha2::Sha256;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Sub;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::{error, warn};
|
||||
|
||||
use rustfs_common::data_usage::TierStats;
|
||||
|
||||
pub type DailyAllTierStats = HashMap<String, LastDayTierStats>;
|
||||
|
||||
#[derive(Clone)]
|
||||
|
||||
@@ -18,14 +18,15 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::bucket::lifecycle::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject};
|
||||
use crate::bucket::lifecycle::lifecycle::{self, ObjectOpts};
|
||||
use crate::global::GLOBAL_TierConfigMgr;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::Any;
|
||||
use std::io::Write;
|
||||
use xxhash_rust::xxh64;
|
||||
|
||||
use super::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject};
|
||||
use super::lifecycle::{self, ObjectOpts};
|
||||
use crate::global::GLOBAL_TierConfigMgr;
|
||||
|
||||
static XXHASH_SEED: u64 = 0;
|
||||
|
||||
#[derive(Default)]
|
||||
|
||||
@@ -12,21 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::{quota::BucketQuota, target::BucketTargets};
|
||||
|
||||
use super::object_lock::ObjectLockApi;
|
||||
use super::versioning::VersioningApi;
|
||||
use super::{quota::BucketQuota, target::BucketTargets};
|
||||
use crate::bucket::utils::deserialize;
|
||||
use crate::config::com::{read_config, save_config};
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::new_object_layer_fn;
|
||||
use crate::store::ECStore;
|
||||
use byteorder::{BigEndian, ByteOrder, LittleEndian};
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use rustfs_policy::policy::BucketPolicy;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, CORSConfiguration, NotificationConfiguration, ObjectLockConfiguration,
|
||||
ReplicationConfiguration, ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration,
|
||||
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
};
|
||||
use serde::Serializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -35,6 +34,9 @@ use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::store::ECStore;
|
||||
|
||||
pub const BUCKET_METADATA_FILE: &str = ".metadata.bin";
|
||||
pub const BUCKET_METADATA_FORMAT: u16 = 1;
|
||||
pub const BUCKET_METADATA_VERSION: u16 = 1;
|
||||
@@ -49,7 +51,6 @@ pub const OBJECT_LOCK_CONFIG: &str = "object-lock.xml";
|
||||
pub const BUCKET_VERSIONING_CONFIG: &str = "versioning.xml";
|
||||
pub const BUCKET_REPLICATION_CONFIG: &str = "replication.xml";
|
||||
pub const BUCKET_TARGETS_FILE: &str = "bucket-targets.json";
|
||||
pub const BUCKET_CORS_CONFIG: &str = "cors.xml";
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
#[serde(rename_all = "PascalCase", default)]
|
||||
@@ -68,7 +69,6 @@ pub struct BucketMetadata {
|
||||
pub replication_config_xml: Vec<u8>,
|
||||
pub bucket_targets_config_json: Vec<u8>,
|
||||
pub bucket_targets_config_meta_json: Vec<u8>,
|
||||
pub cors_config_xml: Vec<u8>,
|
||||
|
||||
pub policy_config_updated_at: OffsetDateTime,
|
||||
pub object_lock_config_updated_at: OffsetDateTime,
|
||||
@@ -81,7 +81,6 @@ pub struct BucketMetadata {
|
||||
pub notification_config_updated_at: OffsetDateTime,
|
||||
pub bucket_targets_config_updated_at: OffsetDateTime,
|
||||
pub bucket_targets_config_meta_updated_at: OffsetDateTime,
|
||||
pub cors_config_updated_at: OffsetDateTime,
|
||||
|
||||
#[serde(skip)]
|
||||
pub new_field_updated_at: OffsetDateTime,
|
||||
@@ -108,8 +107,6 @@ pub struct BucketMetadata {
|
||||
pub bucket_target_config: Option<BucketTargets>,
|
||||
#[serde(skip)]
|
||||
pub bucket_target_config_meta: Option<HashMap<String, String>>,
|
||||
#[serde(skip)]
|
||||
pub cors_config: Option<CORSConfiguration>,
|
||||
}
|
||||
|
||||
impl Default for BucketMetadata {
|
||||
@@ -129,7 +126,6 @@ impl Default for BucketMetadata {
|
||||
replication_config_xml: Default::default(),
|
||||
bucket_targets_config_json: Default::default(),
|
||||
bucket_targets_config_meta_json: Default::default(),
|
||||
cors_config_xml: Default::default(),
|
||||
policy_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
object_lock_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
encryption_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
@@ -141,7 +137,6 @@ impl Default for BucketMetadata {
|
||||
notification_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
bucket_targets_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
bucket_targets_config_meta_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
cors_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
new_field_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
policy_config: Default::default(),
|
||||
notification_config: Default::default(),
|
||||
@@ -154,7 +149,6 @@ impl Default for BucketMetadata {
|
||||
replication_config: Default::default(),
|
||||
bucket_target_config: Default::default(),
|
||||
bucket_target_config_meta: Default::default(),
|
||||
cors_config: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -303,10 +297,6 @@ impl BucketMetadata {
|
||||
self.bucket_targets_config_json = data.clone();
|
||||
self.bucket_targets_config_updated_at = updated;
|
||||
}
|
||||
BUCKET_CORS_CONFIG => {
|
||||
self.cors_config_xml = data;
|
||||
self.cors_config_updated_at = updated;
|
||||
}
|
||||
_ => return Err(Error::other(format!("config file not found : {config_file}"))),
|
||||
}
|
||||
|
||||
@@ -365,7 +355,7 @@ impl BucketMetadata {
|
||||
self.tagging_config = Some(deserialize::<Tagging>(&self.tagging_config_xml)?);
|
||||
}
|
||||
if !self.quota_config_json.is_empty() {
|
||||
self.quota_config = Some(serde_json::from_slice(&self.quota_config_json)?);
|
||||
self.quota_config = Some(BucketQuota::unmarshal(&self.quota_config_json)?);
|
||||
}
|
||||
if !self.replication_config_xml.is_empty() {
|
||||
self.replication_config = Some(deserialize::<ReplicationConfiguration>(&self.replication_config_xml)?);
|
||||
@@ -377,9 +367,6 @@ impl BucketMetadata {
|
||||
} else {
|
||||
self.bucket_target_config = Some(BucketTargets::default())
|
||||
}
|
||||
if !self.cors_config_xml.is_empty() {
|
||||
self.cors_config = Some(deserialize::<CORSConfiguration>(&self.cors_config_xml)?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -500,8 +487,7 @@ mod test {
|
||||
bm.tagging_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add quota configuration
|
||||
let quota_json =
|
||||
r#"{"quota":1073741824,"quota_type":"Hard","created_at":"2024-01-01T00:00:00Z","updated_at":"2024-01-01T00:00:00Z"}"#; // 1GB quota
|
||||
let quota_json = r#"{"quota":1073741824,"quotaType":"hard"}"#; // 1GB quota
|
||||
bm.quota_config_json = quota_json.as_bytes().to_vec();
|
||||
bm.quota_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
|
||||
@@ -12,9 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::metadata::{BucketMetadata, load_bucket_metadata};
|
||||
use super::quota::BucketQuota;
|
||||
use super::target::BucketTargets;
|
||||
use crate::StorageAPI as _;
|
||||
use crate::bucket::bucket_target_sys::BucketTargetSys;
|
||||
use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse};
|
||||
@@ -23,13 +20,12 @@ use crate::error::{Error, Result, is_err_bucket_not_found};
|
||||
use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn};
|
||||
use crate::store::ECStore;
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_policy::policy::BucketPolicy;
|
||||
use s3s::dto::ReplicationConfiguration;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, CORSConfiguration, NotificationConfiguration, ObjectLockConfiguration,
|
||||
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ServerSideEncryptionConfiguration, Tagging,
|
||||
VersioningConfiguration,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::OnceLock;
|
||||
@@ -40,6 +36,12 @@ use tokio::sync::RwLock;
|
||||
use tokio::time::sleep;
|
||||
use tracing::error;
|
||||
|
||||
use super::metadata::{BucketMetadata, load_bucket_metadata};
|
||||
use super::quota::BucketQuota;
|
||||
use super::target::BucketTargets;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_BucketMetadataSys: OnceLock<Arc<RwLock<BucketMetadataSys>>> = OnceLock::new();
|
||||
}
|
||||
@@ -110,13 +112,6 @@ pub async fn get_bucket_targets_config(bucket: &str) -> Result<BucketTargets> {
|
||||
bucket_meta_sys.get_bucket_targets_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_cors_config(bucket: &str) -> Result<(CORSConfiguration, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_cors_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_tagging_config(bucket: &str) -> Result<(Tagging, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
@@ -507,16 +502,6 @@ impl BucketMetadataSys {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_cors_config(&self, bucket: &str) -> Result<(CORSConfiguration, OffsetDateTime)> {
|
||||
let (bm, _) = self.get_config(bucket).await?;
|
||||
|
||||
if let Some(config) = &bm.cors_config {
|
||||
Ok((config.clone(), bm.cors_config_updated_at))
|
||||
} else {
|
||||
Err(Error::ConfigNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn created_at(&self, bucket: &str) -> Result<OffsetDateTime> {
|
||||
let bm = match self.get_config(bucket).await {
|
||||
Ok((bm, _)) => bm.created,
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::dto::{Date, ObjectLockLegalHold, ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode};
|
||||
use s3s::header::{X_AMZ_OBJECT_LOCK_LEGAL_HOLD, X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE};
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, format_description};
|
||||
|
||||
use s3s::dto::{Date, ObjectLockLegalHold, ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode};
|
||||
use s3s::header::{X_AMZ_OBJECT_LOCK_LEGAL_HOLD, X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE};
|
||||
|
||||
const _ERR_MALFORMED_BUCKET_OBJECT_CONFIG: &str = "invalid bucket object lock config";
|
||||
const _ERR_INVALID_RETENTION_DATE: &str = "date must be provided in ISO 8601 format";
|
||||
const _ERR_PAST_OBJECTLOCK_RETAIN_DATE: &str = "the retain until date must be in the future";
|
||||
|
||||
@@ -12,13 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::metadata_sys::get_object_lock_config;
|
||||
use crate::bucket::object_lock::objectlock;
|
||||
use crate::store_api::ObjectInfo;
|
||||
use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode};
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode};
|
||||
|
||||
use crate::bucket::metadata_sys::get_object_lock_config;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
use super::objectlock;
|
||||
|
||||
pub struct BucketObjectLockSys {}
|
||||
|
||||
impl BucketObjectLockSys {
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::{BucketQuota, QuotaCheckResult, QuotaError, QuotaOperation};
|
||||
use crate::bucket::metadata_sys::{BucketMetadataSys, update};
|
||||
use crate::data_usage::get_bucket_usage_memory;
|
||||
use rustfs_common::metrics::Metric;
|
||||
use rustfs_config::QUOTA_CONFIG_FILE;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
pub struct QuotaChecker {
|
||||
metadata_sys: Arc<RwLock<BucketMetadataSys>>,
|
||||
}
|
||||
|
||||
impl QuotaChecker {
|
||||
pub fn new(metadata_sys: Arc<RwLock<BucketMetadataSys>>) -> Self {
|
||||
Self { metadata_sys }
|
||||
}
|
||||
|
||||
pub async fn check_quota(
|
||||
&self,
|
||||
bucket: &str,
|
||||
operation: QuotaOperation,
|
||||
operation_size: u64,
|
||||
) -> Result<QuotaCheckResult, QuotaError> {
|
||||
let start_time = Instant::now();
|
||||
let quota_config = self.get_quota_config(bucket).await?;
|
||||
|
||||
// If no quota limit is set, allow operation
|
||||
let quota_limit = match quota_config.quota {
|
||||
None => {
|
||||
let current_usage = self.get_real_time_usage(bucket).await?;
|
||||
return Ok(QuotaCheckResult {
|
||||
allowed: true,
|
||||
current_usage,
|
||||
quota_limit: None,
|
||||
operation_size,
|
||||
remaining: None,
|
||||
});
|
||||
}
|
||||
Some(q) => q,
|
||||
};
|
||||
|
||||
let current_usage = self.get_real_time_usage(bucket).await?;
|
||||
|
||||
let expected_usage = match operation {
|
||||
QuotaOperation::PutObject | QuotaOperation::CopyObject => current_usage + operation_size,
|
||||
QuotaOperation::DeleteObject => current_usage.saturating_sub(operation_size),
|
||||
};
|
||||
|
||||
let allowed = match operation {
|
||||
QuotaOperation::PutObject | QuotaOperation::CopyObject => {
|
||||
quota_config.check_operation_allowed(current_usage, operation_size)
|
||||
}
|
||||
QuotaOperation::DeleteObject => true,
|
||||
};
|
||||
|
||||
let remaining = if quota_limit >= expected_usage {
|
||||
Some(quota_limit - expected_usage)
|
||||
} else {
|
||||
Some(0)
|
||||
};
|
||||
|
||||
if !allowed {
|
||||
warn!(
|
||||
"Quota exceeded for bucket: {}, current: {}, limit: {}, attempted: {}",
|
||||
bucket, current_usage, quota_limit, operation_size
|
||||
);
|
||||
}
|
||||
|
||||
let result = QuotaCheckResult {
|
||||
allowed,
|
||||
current_usage,
|
||||
quota_limit: Some(quota_limit),
|
||||
operation_size,
|
||||
remaining,
|
||||
};
|
||||
|
||||
let duration = start_time.elapsed();
|
||||
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaCheck, duration).await;
|
||||
if !allowed {
|
||||
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaViolation, duration).await;
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn get_quota_config(&self, bucket: &str) -> Result<BucketQuota, QuotaError> {
|
||||
let meta = self
|
||||
.metadata_sys
|
||||
.read()
|
||||
.await
|
||||
.get(bucket)
|
||||
.await
|
||||
.map_err(QuotaError::StorageError)?;
|
||||
|
||||
if meta.quota_config_json.is_empty() {
|
||||
debug!("No quota config found for bucket: {}, using default", bucket);
|
||||
return Ok(BucketQuota::new(None));
|
||||
}
|
||||
|
||||
let quota: BucketQuota = serde_json::from_slice(&meta.quota_config_json).map_err(|e| QuotaError::InvalidConfig {
|
||||
reason: format!("Failed to parse quota config: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(quota)
|
||||
}
|
||||
|
||||
pub async fn set_quota_config(&mut self, bucket: &str, quota: BucketQuota) -> Result<(), QuotaError> {
|
||||
let json_data = serde_json::to_vec("a).map_err(|e| QuotaError::InvalidConfig {
|
||||
reason: format!("Failed to serialize quota config: {}", e),
|
||||
})?;
|
||||
let start_time = Instant::now();
|
||||
|
||||
update(bucket, QUOTA_CONFIG_FILE, json_data)
|
||||
.await
|
||||
.map_err(QuotaError::StorageError)?;
|
||||
|
||||
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaSync, start_time.elapsed()).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_quota_stats(&self, bucket: &str) -> Result<(BucketQuota, Option<u64>), QuotaError> {
|
||||
// If bucket doesn't exist, return ConfigNotFound error
|
||||
if !self.bucket_exists(bucket).await {
|
||||
return Err(QuotaError::ConfigNotFound {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let quota = self.get_quota_config(bucket).await?;
|
||||
let current_usage = self.get_real_time_usage(bucket).await.unwrap_or(0);
|
||||
|
||||
Ok((quota, Some(current_usage)))
|
||||
}
|
||||
|
||||
pub async fn bucket_exists(&self, bucket: &str) -> bool {
|
||||
self.metadata_sys.read().await.get(bucket).await.is_ok()
|
||||
}
|
||||
|
||||
pub async fn get_real_time_usage(&self, bucket: &str) -> Result<u64, QuotaError> {
|
||||
Ok(get_bucket_usage_memory(bucket).await.unwrap_or(0))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_quota_check_no_limit() {
|
||||
let result = QuotaCheckResult {
|
||||
allowed: true,
|
||||
current_usage: 0,
|
||||
quota_limit: None,
|
||||
operation_size: 1024,
|
||||
remaining: None,
|
||||
};
|
||||
|
||||
assert!(result.allowed);
|
||||
assert_eq!(result.quota_limit, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_quota_check_within_limit() {
|
||||
let quota = BucketQuota::new(Some(2048)); // 2KB
|
||||
|
||||
// Current usage 512, trying to add 1024
|
||||
let allowed = quota.check_operation_allowed(512, 1024);
|
||||
assert!(allowed);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_quota_check_exceeds_limit() {
|
||||
let quota = BucketQuota::new(Some(1024)); // 1KB
|
||||
|
||||
// Current usage 512, trying to add 1024
|
||||
let allowed = quota.check_operation_allowed(512, 1024);
|
||||
assert!(!allowed);
|
||||
}
|
||||
}
|
||||
@@ -12,37 +12,36 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod checker;
|
||||
|
||||
use crate::error::Result;
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use rustfs_config::{
|
||||
QUOTA_API_PATH, QUOTA_EXCEEDED_ERROR_CODE, QUOTA_INTERNAL_ERROR_CODE, QUOTA_INVALID_CONFIG_ERROR_CODE,
|
||||
QUOTA_NOT_FOUND_ERROR_CODE,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
// Define the QuotaType enum
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum QuotaType {
|
||||
/// Hard quota: reject immediately when exceeded
|
||||
#[default]
|
||||
Hard,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq)]
|
||||
// Define the BucketQuota structure
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct BucketQuota {
|
||||
pub quota: Option<u64>,
|
||||
pub quota_type: QuotaType,
|
||||
/// Timestamp when this quota configuration was set (for audit purposes)
|
||||
pub created_at: Option<OffsetDateTime>,
|
||||
quota: Option<u64>, // Use Option to represent optional fields
|
||||
|
||||
size: u64,
|
||||
|
||||
rate: u64,
|
||||
|
||||
requests: u64,
|
||||
|
||||
quota_type: Option<QuotaType>,
|
||||
}
|
||||
|
||||
impl BucketQuota {
|
||||
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
|
||||
let mut buf = Vec::new();
|
||||
|
||||
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
@@ -50,107 +49,4 @@ impl BucketQuota {
|
||||
let t: BucketQuota = rmp_serde::from_slice(buf)?;
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
pub fn new(quota: Option<u64>) -> Self {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
Self {
|
||||
quota,
|
||||
quota_type: QuotaType::Hard,
|
||||
created_at: Some(now),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_quota_limit(&self) -> Option<u64> {
|
||||
self.quota
|
||||
}
|
||||
|
||||
pub fn check_operation_allowed(&self, current_usage: u64, operation_size: u64) -> bool {
|
||||
if let Some(quota_limit) = self.quota {
|
||||
current_usage.saturating_add(operation_size) <= quota_limit
|
||||
} else {
|
||||
true // No quota limit
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_remaining_quota(&self, current_usage: u64) -> Option<u64> {
|
||||
self.quota.map(|limit| limit.saturating_sub(current_usage))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct QuotaCheckResult {
|
||||
pub allowed: bool,
|
||||
pub current_usage: u64,
|
||||
/// quota_limit: None means unlimited
|
||||
pub quota_limit: Option<u64>,
|
||||
pub operation_size: u64,
|
||||
pub remaining: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum QuotaOperation {
|
||||
PutObject,
|
||||
CopyObject,
|
||||
DeleteObject,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum QuotaError {
|
||||
#[error("Bucket quota exceeded: current={current}, limit={limit}, operation={operation}")]
|
||||
QuotaExceeded { current: u64, limit: u64, operation: u64 },
|
||||
#[error("Quota configuration not found for bucket: {bucket}")]
|
||||
ConfigNotFound { bucket: String },
|
||||
#[error("Invalid quota configuration: {reason}")]
|
||||
InvalidConfig { reason: String },
|
||||
#[error("Storage error: {0}")]
|
||||
StorageError(#[from] crate::error::StorageError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct QuotaErrorResponse {
|
||||
#[serde(rename = "Code")]
|
||||
pub code: String,
|
||||
#[serde(rename = "Message")]
|
||||
pub message: String,
|
||||
#[serde(rename = "Resource")]
|
||||
pub resource: String,
|
||||
#[serde(rename = "RequestId")]
|
||||
pub request_id: String,
|
||||
#[serde(rename = "HostId")]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
impl QuotaErrorResponse {
|
||||
pub fn new(quota_error: &QuotaError, request_id: &str, host_id: &str) -> Self {
|
||||
match quota_error {
|
||||
QuotaError::QuotaExceeded { .. } => Self {
|
||||
code: QUOTA_EXCEEDED_ERROR_CODE.to_string(),
|
||||
message: quota_error.to_string(),
|
||||
resource: QUOTA_API_PATH.to_string(),
|
||||
request_id: request_id.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
},
|
||||
QuotaError::ConfigNotFound { .. } => Self {
|
||||
code: QUOTA_NOT_FOUND_ERROR_CODE.to_string(),
|
||||
message: quota_error.to_string(),
|
||||
resource: QUOTA_API_PATH.to_string(),
|
||||
request_id: request_id.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
},
|
||||
QuotaError::InvalidConfig { .. } => Self {
|
||||
code: QUOTA_INVALID_CONFIG_ERROR_CODE.to_string(),
|
||||
message: quota_error.to_string(),
|
||||
resource: QUOTA_API_PATH.to_string(),
|
||||
request_id: request_id.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
},
|
||||
QuotaError::StorageError(_) => Self {
|
||||
code: QUOTA_INTERNAL_ERROR_CODE.to_string(),
|
||||
message: quota_error.to_string(),
|
||||
resource: QUOTA_API_PATH.to_string(),
|
||||
request_id: request_id.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::replication::ReplicationRuleExt as _;
|
||||
use super::ReplicationRuleExt as _;
|
||||
use crate::bucket::tagging::decode_tags_to_map;
|
||||
use rustfs_filemeta::ReplicationType;
|
||||
use s3s::dto::DeleteMarkerReplicationStatus;
|
||||
|
||||
@@ -1,30 +1,22 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::StorageAPI;
|
||||
use crate::bucket::replication::ResyncOpts;
|
||||
use crate::bucket::replication::ResyncStatusType;
|
||||
use crate::bucket::replication::replicate_delete;
|
||||
use crate::bucket::replication::replicate_object;
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crate::bucket::replication::replication_resyncer::{
|
||||
BucketReplicationResyncStatus, DeletedObjectReplicationInfo, ReplicationResyncer,
|
||||
};
|
||||
use crate::bucket::replication::replication_state::ReplicationStats;
|
||||
use crate::config::com::read_config;
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::error::Error as EcstoreError;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_filemeta::MrfReplicateEntry;
|
||||
use rustfs_filemeta::ReplicateDecision;
|
||||
@@ -37,10 +29,6 @@ use rustfs_filemeta::ResyncDecision;
|
||||
use rustfs_filemeta::replication_statuses_map;
|
||||
use rustfs_filemeta::version_purge_statuses_map;
|
||||
use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::atomic::Ordering;
|
||||
use time::OffsetDateTime;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::bucket_target_sys::{
|
||||
AdvancedPutOptions, BucketTargetSys, PutObjectOptions, PutObjectPartOptions, RemoveObjectOptions, TargetClient,
|
||||
};
|
||||
@@ -30,6 +16,7 @@ use crate::event_notification::{EventArgs, send_event};
|
||||
use crate::global::GLOBAL_LocalNodeName;
|
||||
use crate::store_api::{DeletedObject, ObjectInfo, ObjectOptions, ObjectToDelete, WalkOptions};
|
||||
use crate::{StorageAPI, new_object_layer_fn};
|
||||
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use aws_sdk_s3::operation::head_object::HeadObjectOutput;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
@@ -37,6 +24,7 @@ use aws_sdk_s3::types::{CompletedPart, ObjectLockLegalHoldStatus};
|
||||
use byteorder::ByteOrder;
|
||||
use futures::future::join_all;
|
||||
use http::HeaderMap;
|
||||
|
||||
use regex::Regex;
|
||||
use rustfs_filemeta::{
|
||||
MrfReplicateEntry, REPLICATE_EXISTING, REPLICATE_EXISTING_DELETE, REPLICATION_RESET, ReplicateDecision, ReplicateObjectInfo,
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Error;
|
||||
use rustfs_filemeta::{ReplicatedTargetInfo, ReplicationStatusType, ReplicationType};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -12,10 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use s3s::dto::ReplicaModificationsStatus;
|
||||
use s3s::dto::ReplicationRule;
|
||||
|
||||
use super::ObjectOpts;
|
||||
|
||||
pub trait ReplicationRuleExt {
|
||||
fn prefix(&self) -> &str;
|
||||
fn metadata_replicate(&self, obj: &ObjectOpts) -> bool;
|
||||
|
||||
@@ -12,8 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::dto::Tag;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use s3s::dto::Tag;
|
||||
use url::form_urlencoded;
|
||||
|
||||
pub fn decode_tags(tags: &str) -> Vec<Tag> {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use super::BucketTargetType;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
|
||||
@@ -14,15 +14,16 @@
|
||||
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::{Error, Result, StorageError};
|
||||
use regex::Regex;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use s3s::xml;
|
||||
use tracing::instrument;
|
||||
|
||||
pub fn is_meta_bucketname(name: &str) -> bool {
|
||||
name.starts_with(RUSTFS_META_BUCKET)
|
||||
}
|
||||
|
||||
use regex::Regex;
|
||||
use tracing::instrument;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref VALID_BUCKET_NAME: Regex = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$").unwrap();
|
||||
static ref VALID_BUCKET_NAME_STRICT: Regex = Regex::new(r"^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$").unwrap();
|
||||
@@ -193,7 +194,7 @@ pub fn is_valid_object_name(object: &str) -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
if object.ends_with(SLASH_SEPARATOR_STR) {
|
||||
if object.ends_with(SLASH_SEPARATOR) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -205,7 +206,7 @@ pub fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Res
|
||||
return Err(StorageError::ObjectNameTooLong(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
if object.starts_with(SLASH_SEPARATOR_STR) {
|
||||
if object.starts_with(SLASH_SEPARATOR) {
|
||||
return Err(StorageError::ObjectNamePrefixAsSlash(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
|
||||
@@ -12,9 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_utils::string::match_simple;
|
||||
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
|
||||
|
||||
use rustfs_utils::string::match_simple;
|
||||
|
||||
pub trait VersioningApi {
|
||||
fn enabled(&self) -> bool;
|
||||
fn prefix_enabled(&self, prefix: &str) -> bool;
|
||||
|
||||
@@ -12,8 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use std::sync::Arc;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub mod metacache_set;
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_utils::string::{has_pattern, has_string_suffix_in_slice};
|
||||
use rustfs_utils::string::has_pattern;
|
||||
use rustfs_utils::string::has_string_suffix_in_slice;
|
||||
use std::env;
|
||||
use tracing::error;
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ use crate::error::{Error, Result};
|
||||
use crate::store_api::{ObjectInfo, ObjectOptions, PutObjReader, StorageAPI};
|
||||
use http::HeaderMap;
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::sync::LazyLock;
|
||||
@@ -29,7 +29,7 @@ const CONFIG_FILE: &str = "config.json";
|
||||
|
||||
pub const STORAGE_CLASS_SUB_SYS: &str = "storage_class";
|
||||
|
||||
static CONFIG_BUCKET: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR_STR}{CONFIG_PREFIX}"));
|
||||
static CONFIG_BUCKET: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR}{CONFIG_PREFIX}"));
|
||||
|
||||
static SUB_SYSTEMS_DYNAMIC: LazyLock<HashSet<String>> = LazyLock::new(|| {
|
||||
let mut h = HashSet::new();
|
||||
@@ -129,7 +129,7 @@ async fn new_and_save_server_config<S: StorageAPI>(api: Arc<S>) -> Result<Config
|
||||
}
|
||||
|
||||
fn get_config_file() -> String {
|
||||
format!("{CONFIG_PREFIX}{SLASH_SEPARATOR_STR}{CONFIG_FILE}")
|
||||
format!("{CONFIG_PREFIX}{SLASH_SEPARATOR}{CONFIG_FILE}")
|
||||
}
|
||||
|
||||
/// Handle the situation where the configuration file does not exist, create and save a new configuration
|
||||
|
||||
@@ -12,66 +12,52 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod local_snapshot;
|
||||
|
||||
use crate::{
|
||||
bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, error::Error, store::ECStore,
|
||||
store_api::StorageAPI,
|
||||
use std::{
|
||||
collections::{HashMap, hash_map::Entry},
|
||||
sync::Arc,
|
||||
time::SystemTime,
|
||||
};
|
||||
|
||||
pub mod local_snapshot;
|
||||
pub use local_snapshot::{
|
||||
DATA_USAGE_DIR, DATA_USAGE_STATE_DIR, LOCAL_USAGE_SNAPSHOT_VERSION, LocalUsageSnapshot, LocalUsageSnapshotMeta,
|
||||
data_usage_dir, data_usage_state_dir, ensure_data_usage_layout, read_snapshot as read_local_snapshot, snapshot_file_name,
|
||||
snapshot_object_path, snapshot_path, write_snapshot as write_local_snapshot,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, store::ECStore, store_api::StorageAPI,
|
||||
};
|
||||
use rustfs_common::data_usage::{
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary,
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use std::{
|
||||
collections::{HashMap, hash_map::Entry},
|
||||
sync::{Arc, OnceLock},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
// Data usage storage constants
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR_STR;
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
|
||||
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
|
||||
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
|
||||
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
|
||||
const DATA_USAGE_CACHE_TTL_SECS: u64 = 30;
|
||||
|
||||
type UsageMemoryCache = Arc<RwLock<HashMap<String, (u64, SystemTime)>>>;
|
||||
type CacheUpdating = Arc<RwLock<bool>>;
|
||||
|
||||
static USAGE_MEMORY_CACHE: OnceLock<UsageMemoryCache> = OnceLock::new();
|
||||
static USAGE_CACHE_UPDATING: OnceLock<CacheUpdating> = OnceLock::new();
|
||||
|
||||
fn memory_cache() -> &'static UsageMemoryCache {
|
||||
USAGE_MEMORY_CACHE.get_or_init(|| Arc::new(RwLock::new(HashMap::new())))
|
||||
}
|
||||
|
||||
fn cache_updating() -> &'static CacheUpdating {
|
||||
USAGE_CACHE_UPDATING.get_or_init(|| Arc::new(RwLock::new(false)))
|
||||
}
|
||||
|
||||
// Data usage storage paths
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}",
|
||||
crate::disk::RUSTFS_META_BUCKET,
|
||||
SLASH_SEPARATOR_STR,
|
||||
SLASH_SEPARATOR,
|
||||
crate::disk::BUCKET_META_PREFIX
|
||||
);
|
||||
pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}",
|
||||
crate::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR_STR,
|
||||
SLASH_SEPARATOR,
|
||||
DATA_USAGE_OBJ_NAME
|
||||
);
|
||||
pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}",
|
||||
crate::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR_STR,
|
||||
SLASH_SEPARATOR,
|
||||
DATA_USAGE_BLOOM_NAME
|
||||
);
|
||||
}
|
||||
@@ -108,8 +94,8 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
error!("Failed to read data usage info from backend: {}", e);
|
||||
if e == Error::ConfigNotFound {
|
||||
info!("Data usage config not found, building basic statistics");
|
||||
if e == crate::error::Error::ConfigNotFound {
|
||||
warn!("Data usage config not found, building basic statistics");
|
||||
return build_basic_data_usage_info(store).await;
|
||||
}
|
||||
return Err(Error::other(e));
|
||||
@@ -142,7 +128,7 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
|
||||
.map(|(bucket, &size)| {
|
||||
(
|
||||
bucket.clone(),
|
||||
BucketUsageInfo {
|
||||
rustfs_common::data_usage::BucketUsageInfo {
|
||||
size,
|
||||
..Default::default()
|
||||
},
|
||||
@@ -259,7 +245,7 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
|
||||
|
||||
// If a snapshot is corrupted or unreadable, skip it but keep processing others
|
||||
if let Err(err) = &snapshot_result {
|
||||
info!(
|
||||
warn!(
|
||||
"Failed to read data usage snapshot for disk {} (pool {}, set {}, disk {}): {}",
|
||||
disk_id, pool_idx, set_disks.set_index, disk_index, err
|
||||
);
|
||||
@@ -268,7 +254,7 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
|
||||
if let Err(remove_err) = fs::remove_file(&snapshot_file).await
|
||||
&& remove_err.kind() != std::io::ErrorKind::NotFound
|
||||
{
|
||||
info!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
|
||||
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -355,7 +341,7 @@ pub async fn compute_bucket_usage(store: Arc<ECStore>, bucket_name: &str) -> Res
|
||||
|
||||
continuation = result.next_continuation_token.clone();
|
||||
if continuation.is_none() {
|
||||
info!(
|
||||
warn!(
|
||||
"Bucket {} listing marked truncated but no continuation token returned; stopping early",
|
||||
bucket_name
|
||||
);
|
||||
@@ -378,120 +364,8 @@ pub async fn compute_bucket_usage(store: Arc<ECStore>, bucket_name: &str) -> Res
|
||||
Ok(usage)
|
||||
}
|
||||
|
||||
/// Fast in-memory increment for immediate quota consistency
|
||||
pub async fn increment_bucket_usage_memory(bucket: &str, size_increment: u64) {
|
||||
let mut cache = memory_cache().write().await;
|
||||
let current = cache.entry(bucket.to_string()).or_insert_with(|| (0, SystemTime::now()));
|
||||
current.0 += size_increment;
|
||||
current.1 = SystemTime::now();
|
||||
}
|
||||
|
||||
/// Fast in-memory decrement for immediate quota consistency
|
||||
pub async fn decrement_bucket_usage_memory(bucket: &str, size_decrement: u64) {
|
||||
let mut cache = memory_cache().write().await;
|
||||
if let Some(current) = cache.get_mut(bucket) {
|
||||
current.0 = current.0.saturating_sub(size_decrement);
|
||||
current.1 = SystemTime::now();
|
||||
}
|
||||
}
|
||||
|
||||
/// Get bucket usage from in-memory cache
|
||||
pub async fn get_bucket_usage_memory(bucket: &str) -> Option<u64> {
|
||||
update_usage_cache_if_needed().await;
|
||||
|
||||
let cache = memory_cache().read().await;
|
||||
cache.get(bucket).map(|(usage, _)| *usage)
|
||||
}
|
||||
|
||||
async fn update_usage_cache_if_needed() {
|
||||
let ttl = Duration::from_secs(DATA_USAGE_CACHE_TTL_SECS);
|
||||
let double_ttl = ttl * 2;
|
||||
let now = SystemTime::now();
|
||||
|
||||
let cache = memory_cache().read().await;
|
||||
let earliest_timestamp = cache.values().map(|(_, ts)| *ts).min();
|
||||
drop(cache);
|
||||
|
||||
let age = match earliest_timestamp {
|
||||
Some(ts) => now.duration_since(ts).unwrap_or_default(),
|
||||
None => double_ttl,
|
||||
};
|
||||
|
||||
if age < ttl {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut updating = cache_updating().write().await;
|
||||
if age < double_ttl {
|
||||
if *updating {
|
||||
return;
|
||||
}
|
||||
*updating = true;
|
||||
drop(updating);
|
||||
|
||||
let cache_clone = (*memory_cache()).clone();
|
||||
let updating_clone = (*cache_updating()).clone();
|
||||
tokio::spawn(async move {
|
||||
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get()
|
||||
&& let Ok(data_usage_info) = load_data_usage_from_backend(store.clone()).await
|
||||
{
|
||||
let mut cache = cache_clone.write().await;
|
||||
for (bucket_name, bucket_usage) in data_usage_info.buckets_usage.iter() {
|
||||
cache.insert(bucket_name.clone(), (bucket_usage.size, SystemTime::now()));
|
||||
}
|
||||
}
|
||||
let mut updating = updating_clone.write().await;
|
||||
*updating = false;
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
for retry in 0..10 {
|
||||
if !*updating {
|
||||
break;
|
||||
}
|
||||
drop(updating);
|
||||
let delay = Duration::from_millis(1 << retry);
|
||||
tokio::time::sleep(delay).await;
|
||||
updating = cache_updating().write().await;
|
||||
}
|
||||
|
||||
*updating = true;
|
||||
drop(updating);
|
||||
|
||||
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get()
|
||||
&& let Ok(data_usage_info) = load_data_usage_from_backend(store.clone()).await
|
||||
{
|
||||
let mut cache = memory_cache().write().await;
|
||||
for (bucket_name, bucket_usage) in data_usage_info.buckets_usage.iter() {
|
||||
cache.insert(bucket_name.clone(), (bucket_usage.size, SystemTime::now()));
|
||||
}
|
||||
}
|
||||
|
||||
let mut updating = cache_updating().write().await;
|
||||
*updating = false;
|
||||
}
|
||||
|
||||
/// Sync memory cache with backend data (called by scanner)
|
||||
pub async fn sync_memory_cache_with_backend() -> Result<(), Error> {
|
||||
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get() {
|
||||
match load_data_usage_from_backend(store.clone()).await {
|
||||
Ok(data_usage_info) => {
|
||||
let mut cache = memory_cache().write().await;
|
||||
for (bucket, bucket_usage) in data_usage_info.buckets_usage.iter() {
|
||||
cache.insert(bucket.clone(), (bucket_usage.size, SystemTime::now()));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to sync memory cache with backend: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build basic data usage info with real object counts
|
||||
pub async fn build_basic_data_usage_info(store: Arc<ECStore>) -> Result<DataUsageInfo, Error> {
|
||||
async fn build_basic_data_usage_info(store: Arc<ECStore>) -> Result<DataUsageInfo, Error> {
|
||||
let mut data_usage_info = DataUsageInfo::default();
|
||||
|
||||
// Get bucket list
|
||||
@@ -563,7 +437,7 @@ pub fn cache_to_data_usage_info(cache: &DataUsageCache, path: &str, buckets: &[c
|
||||
None => continue,
|
||||
};
|
||||
let flat = cache.flatten(&e);
|
||||
let mut bui = BucketUsageInfo {
|
||||
let mut bui = rustfs_common::data_usage::BucketUsageInfo {
|
||||
size: flat.size as u64,
|
||||
versions_count: flat.versions as u64,
|
||||
objects_count: flat.objects as u64,
|
||||
@@ -641,7 +515,7 @@ pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str
|
||||
break;
|
||||
}
|
||||
Err(err) => match err {
|
||||
Error::FileNotFound | Error::VolumeNotFound => {
|
||||
crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => {
|
||||
match store
|
||||
.get_object_reader(
|
||||
RUSTFS_META_BUCKET,
|
||||
@@ -662,7 +536,7 @@ pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str
|
||||
break;
|
||||
}
|
||||
Err(_) => match err {
|
||||
Error::FileNotFound | Error::VolumeNotFound => {
|
||||
crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => {
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
@@ -691,9 +565,9 @@ pub async fn save_data_usage_cache(cache: &DataUsageCache, name: &str) -> crate:
|
||||
use std::path::Path;
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
return Err(Error::other("errServerNotInitialized"));
|
||||
return Err(crate::error::Error::other("errServerNotInitialized"));
|
||||
};
|
||||
let buf = cache.marshal_msg().map_err(Error::other)?;
|
||||
let buf = cache.marshal_msg().map_err(crate::error::Error::other)?;
|
||||
let buf_clone = buf.clone();
|
||||
|
||||
let store_clone = store.clone();
|
||||
|
||||
@@ -1,25 +1,13 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::fs;
|
||||
|
||||
use crate::data_usage::BucketUsageInfo;
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
use tokio::fs;
|
||||
|
||||
/// Directory used to store per-disk usage snapshots under the metadata bucket.
|
||||
pub const DATA_USAGE_DIR: &str = "datausage";
|
||||
|
||||
@@ -95,22 +95,22 @@ impl DiskHealthTracker {
|
||||
|
||||
/// Check if disk is faulty
|
||||
pub fn is_faulty(&self) -> bool {
|
||||
self.status.load(Ordering::Acquire) == DISK_HEALTH_FAULTY
|
||||
self.status.load(Ordering::Relaxed) == DISK_HEALTH_FAULTY
|
||||
}
|
||||
|
||||
/// Set disk as faulty
|
||||
pub fn set_faulty(&self) {
|
||||
self.status.store(DISK_HEALTH_FAULTY, Ordering::Release);
|
||||
self.status.store(DISK_HEALTH_FAULTY, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set disk as OK
|
||||
pub fn set_ok(&self) {
|
||||
self.status.store(DISK_HEALTH_OK, Ordering::Release);
|
||||
self.status.store(DISK_HEALTH_OK, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn swap_ok_to_faulty(&self) -> bool {
|
||||
self.status
|
||||
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::AcqRel, Ordering::Relaxed)
|
||||
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ impl DiskHealthTracker {
|
||||
|
||||
/// Get last success timestamp
|
||||
pub fn last_success(&self) -> i64 {
|
||||
self.last_success.load(Ordering::Acquire)
|
||||
self.last_success.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::{Error, Result};
|
||||
use super::error::{Error, Result};
|
||||
use path_absolutize::Absolutize;
|
||||
use rustfs_utils::{is_local_host, is_socket_addr};
|
||||
use std::{fmt::Display, path::Path};
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// use crate::quorum::CheckErrorFn;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::io::{self};
|
||||
use std::path::PathBuf;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::DiskError;
|
||||
use super::error::DiskError;
|
||||
|
||||
pub fn to_file_error(io_err: std::io::Error) -> std::io::Error {
|
||||
match io_err.kind() {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::Error;
|
||||
use super::error::Error;
|
||||
|
||||
pub static OBJECT_OP_IGNORED_ERRS: &[Error] = &[
|
||||
Error::DiskNotFound,
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::{Error, Result};
|
||||
use crate::disk::{DiskInfo, error::DiskError};
|
||||
use super::error::{Error, Result};
|
||||
use super::{DiskInfo, error::DiskError};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Error as JsonError;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::{
|
||||
path::Path,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
fs::{self, File},
|
||||
io,
|
||||
|
||||
@@ -12,26 +12,38 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::config::storageclass::DEFAULT_INLINE_BLOCK;
|
||||
use crate::data_usage::local_snapshot::ensure_data_usage_layout;
|
||||
use crate::disk::{
|
||||
BUCKET_META_PREFIX, CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN,
|
||||
CHECK_PART_VOLUME_NOT_FOUND, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics,
|
||||
FileInfoVersions, FileReader, FileWriter, RUSTFS_META_BUCKET, RUSTFS_META_TMP_DELETED_BUCKET, ReadMultipleReq,
|
||||
ReadMultipleResp, ReadOptions, RenameDataResp, STORAGE_FORMAT_FILE, STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts,
|
||||
VolumeInfo, WalkDirOptions, conv_part_err_to_int,
|
||||
endpoint::Endpoint,
|
||||
error::{DiskError, Error, FileAccessDeniedWithContext, Result},
|
||||
error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error},
|
||||
format::FormatV3,
|
||||
fs::{O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename},
|
||||
os,
|
||||
os::{check_path_length, is_empty_dir, is_root_disk, rename_all},
|
||||
use super::error::{Error, Result};
|
||||
use super::os::{is_root_disk, rename_all};
|
||||
use super::{
|
||||
BUCKET_META_PREFIX, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics,
|
||||
FileInfoVersions, RUSTFS_META_BUCKET, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp,
|
||||
STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, os,
|
||||
};
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use crate::file_cache::{get_global_file_cache, prefetch_metadata_patterns, read_metadata_cached};
|
||||
use super::{endpoint::Endpoint, error::DiskError, format::FormatV3};
|
||||
|
||||
use crate::data_usage::local_snapshot::ensure_data_usage_layout;
|
||||
use crate::disk::error::FileAccessDeniedWithContext;
|
||||
use crate::disk::error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error};
|
||||
use crate::disk::fs::{
|
||||
O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename,
|
||||
};
|
||||
use crate::disk::os::{check_path_length, is_empty_dir};
|
||||
use crate::disk::{
|
||||
CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN, CHECK_PART_VOLUME_NOT_FOUND,
|
||||
FileReader, RUSTFS_META_TMP_DELETED_BUCKET, conv_part_err_to_int,
|
||||
};
|
||||
use crate::disk::{FileWriter, STORAGE_FORMAT_FILE};
|
||||
use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold};
|
||||
use rustfs_utils::path::{
|
||||
GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR, clean, decode_dir_object, encode_dir_object, has_suffix,
|
||||
path_join, path_join_buf,
|
||||
};
|
||||
use tokio::time::interval;
|
||||
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use bytes::Bytes;
|
||||
// use path_absolutize::Absolutize; // Replaced with direct path operations for better performance
|
||||
use crate::file_cache::{get_global_file_cache, prefetch_metadata_patterns, read_metadata_cached};
|
||||
use parking_lot::RwLock as ParkingLotRwLock;
|
||||
use rustfs_filemeta::{
|
||||
Cache, FileInfo, FileInfoOpts, FileMeta, MetaCacheEntry, MetacacheWriter, ObjectPartInfo, Opts, RawFileInfo, UpdateFn,
|
||||
@@ -39,10 +51,6 @@ use rustfs_filemeta::{
|
||||
};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use rustfs_utils::os::get_info;
|
||||
use rustfs_utils::path::{
|
||||
GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR_STR, clean, decode_dir_object, encode_dir_object,
|
||||
has_suffix, path_join, path_join_buf,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Debug;
|
||||
@@ -58,7 +66,6 @@ use time::OffsetDateTime;
|
||||
use tokio::fs::{self, File};
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt, ErrorKind};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::time::interval;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -121,8 +128,7 @@ impl LocalDisk {
|
||||
pub async fn new(ep: &Endpoint, cleanup: bool) -> Result<Self> {
|
||||
debug!("Creating local disk");
|
||||
// Use optimized path resolution instead of absolutize() for better performance
|
||||
// Use dunce::canonicalize instead of std::fs::canonicalize to avoid UNC paths on Windows
|
||||
let root = match dunce::canonicalize(ep.get_file_path()) {
|
||||
let root = match std::fs::canonicalize(ep.get_file_path()) {
|
||||
Ok(path) => path,
|
||||
Err(e) => {
|
||||
if e.kind() == ErrorKind::NotFound {
|
||||
@@ -476,31 +482,23 @@ impl LocalDisk {
|
||||
|
||||
// Async prefetch related files, don't block current read
|
||||
if let Some(parent) = file_path.parent() {
|
||||
prefetch_metadata_patterns(parent, &[STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await;
|
||||
prefetch_metadata_patterns(parent, &[super::STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await;
|
||||
}
|
||||
|
||||
// Main read logic
|
||||
let file_dir = self.get_bucket_path(volume)?;
|
||||
let (data, _) = self.read_raw(volume, file_dir, file_path, opts.read_data).await?;
|
||||
|
||||
get_file_info(
|
||||
&data,
|
||||
volume,
|
||||
path,
|
||||
version_id,
|
||||
FileInfoOpts {
|
||||
data: opts.read_data,
|
||||
include_free_versions: false,
|
||||
},
|
||||
)
|
||||
.map_err(|_e| DiskError::Unexpected)
|
||||
get_file_info(&data, volume, path, version_id, FileInfoOpts { data: opts.read_data })
|
||||
.await
|
||||
.map_err(|_e| DiskError::Unexpected)
|
||||
}
|
||||
|
||||
// Batch metadata reading for multiple objects
|
||||
async fn read_metadata_batch(&self, requests: Vec<(String, String)>) -> Result<Vec<Option<Arc<FileMeta>>>> {
|
||||
let paths: Vec<PathBuf> = requests
|
||||
.iter()
|
||||
.map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, STORAGE_FORMAT_FILE)))
|
||||
.map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, super::STORAGE_FORMAT_FILE)))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let cache = get_global_file_cache();
|
||||
@@ -537,7 +535,7 @@ impl LocalDisk {
|
||||
|
||||
// TODO: async notifications for disk space checks and trash cleanup
|
||||
|
||||
let trash_path = self.get_object_path(RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
// if let Some(parent) = trash_path.parent() {
|
||||
// if !parent.exists() {
|
||||
// fs::create_dir_all(parent).await?;
|
||||
@@ -545,7 +543,7 @@ impl LocalDisk {
|
||||
// }
|
||||
|
||||
let err = if recursive {
|
||||
rename_all(delete_path, trash_path, self.get_bucket_path(RUSTFS_META_TMP_DELETED_BUCKET)?)
|
||||
rename_all(delete_path, trash_path, self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?)
|
||||
.await
|
||||
.err()
|
||||
} else {
|
||||
@@ -555,12 +553,12 @@ impl LocalDisk {
|
||||
.err()
|
||||
};
|
||||
|
||||
if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR_STR) {
|
||||
let trash_path2 = self.get_object_path(RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR) {
|
||||
let trash_path2 = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
let _ = rename_all(
|
||||
encode_dir_object(delete_path.to_string_lossy().as_ref()),
|
||||
trash_path2,
|
||||
self.get_bucket_path(RUSTFS_META_TMP_DELETED_BUCKET)?,
|
||||
self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -844,11 +842,7 @@ impl LocalDisk {
|
||||
self.write_all_internal(&tmp_file_path, InternalBuf::Ref(buf), sync, &tmp_volume_dir)
|
||||
.await?;
|
||||
|
||||
rename_all(tmp_file_path, &file_path, volume_dir).await?;
|
||||
|
||||
// Invalidate cache after successful write
|
||||
get_global_file_cache().invalidate(&file_path).await;
|
||||
Ok(())
|
||||
rename_all(tmp_file_path, file_path, volume_dir).await
|
||||
}
|
||||
|
||||
// write_all_public for trail
|
||||
@@ -913,7 +907,7 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
if let Some(parent) = path.as_ref().parent() {
|
||||
os::make_dir_all(parent, skip_parent).await?;
|
||||
super::os::make_dir_all(parent, skip_parent).await?;
|
||||
}
|
||||
|
||||
let f = super::fs::open_file(path.as_ref(), mode).await.map_err(to_file_error)?;
|
||||
@@ -939,7 +933,7 @@ impl LocalDisk {
|
||||
let meta = file.metadata().await.map_err(to_file_error)?;
|
||||
let file_size = meta.len() as usize;
|
||||
|
||||
bitrot_verify(Box::new(file), file_size, part_size, algo, Bytes::copy_from_slice(sum), shard_size)
|
||||
bitrot_verify(Box::new(file), file_size, part_size, algo, bytes::Bytes::copy_from_slice(sum), shard_size)
|
||||
.await
|
||||
.map_err(to_file_error)?;
|
||||
|
||||
@@ -1035,16 +1029,15 @@ impl LocalDisk {
|
||||
continue;
|
||||
}
|
||||
|
||||
if entry.ends_with(SLASH_SEPARATOR_STR) {
|
||||
if entry.ends_with(SLASH_SEPARATOR) {
|
||||
if entry.ends_with(GLOBAL_DIR_SUFFIX_WITH_SLASH) {
|
||||
let entry =
|
||||
format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR_STR);
|
||||
let entry = format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR);
|
||||
dir_objes.insert(entry.clone());
|
||||
*item = entry;
|
||||
continue;
|
||||
}
|
||||
|
||||
*item = entry.trim_end_matches(SLASH_SEPARATOR_STR).to_owned();
|
||||
*item = entry.trim_end_matches(SLASH_SEPARATOR).to_owned();
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1056,7 +1049,7 @@ impl LocalDisk {
|
||||
.await?;
|
||||
|
||||
let entry = entry.strip_suffix(STORAGE_FORMAT_FILE).unwrap_or_default().to_owned();
|
||||
let name = entry.trim_end_matches(SLASH_SEPARATOR_STR);
|
||||
let name = entry.trim_end_matches(SLASH_SEPARATOR);
|
||||
let name = decode_dir_object(format!("{}/{}", ¤t, &name).as_str());
|
||||
|
||||
// if opts.limit > 0
|
||||
@@ -1139,7 +1132,7 @@ impl LocalDisk {
|
||||
Ok(res) => {
|
||||
if is_dir_obj {
|
||||
meta.name = meta.name.trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH).to_owned();
|
||||
meta.name.push_str(SLASH_SEPARATOR_STR);
|
||||
meta.name.push_str(SLASH_SEPARATOR);
|
||||
}
|
||||
|
||||
meta.metadata = res;
|
||||
@@ -1157,7 +1150,7 @@ impl LocalDisk {
|
||||
// NOT an object, append to stack (with slash)
|
||||
// If dirObject, but no metadata (which is unexpected) we skip it.
|
||||
if !is_dir_obj && !is_empty_dir(self.get_object_path(&opts.bucket, &meta.name)?).await {
|
||||
meta.name.push_str(SLASH_SEPARATOR_STR);
|
||||
meta.name.push_str(SLASH_SEPARATOR);
|
||||
dir_stack.push(meta.name);
|
||||
}
|
||||
}
|
||||
@@ -1232,7 +1225,7 @@ async fn read_file_metadata(p: impl AsRef<Path>) -> Result<Metadata> {
|
||||
|
||||
fn skip_access_checks(p: impl AsRef<str>) -> bool {
|
||||
let vols = [
|
||||
RUSTFS_META_TMP_DELETED_BUCKET,
|
||||
super::RUSTFS_META_TMP_DELETED_BUCKET,
|
||||
super::RUSTFS_META_TMP_BUCKET,
|
||||
super::RUSTFS_META_MULTIPART_BUCKET,
|
||||
RUSTFS_META_BUCKET,
|
||||
@@ -1626,8 +1619,8 @@ impl DiskAPI for LocalDisk {
|
||||
super::fs::access_std(&dst_volume_dir).map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?
|
||||
}
|
||||
|
||||
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR_STR);
|
||||
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR_STR);
|
||||
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR);
|
||||
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR);
|
||||
|
||||
if !src_is_dir && dst_is_dir || src_is_dir && !dst_is_dir {
|
||||
warn!(
|
||||
@@ -1693,8 +1686,8 @@ impl DiskAPI for LocalDisk {
|
||||
.map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?;
|
||||
}
|
||||
|
||||
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR_STR);
|
||||
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR_STR);
|
||||
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR);
|
||||
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR);
|
||||
if (dst_is_dir || src_is_dir) && (!dst_is_dir || !src_is_dir) {
|
||||
return Err(Error::from(DiskError::FileAccessDenied));
|
||||
}
|
||||
@@ -1845,12 +1838,12 @@ impl DiskAPI for LocalDisk {
|
||||
}
|
||||
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
let dir_path_abs = self.get_object_path(volume, dir_path.trim_start_matches(SLASH_SEPARATOR_STR))?;
|
||||
let dir_path_abs = self.get_object_path(volume, dir_path.trim_start_matches(SLASH_SEPARATOR))?;
|
||||
|
||||
let entries = match os::read_dir(&dir_path_abs, count).await {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
if e.kind() == ErrorKind::NotFound
|
||||
if e.kind() == std::io::ErrorKind::NotFound
|
||||
&& !skip_access_checks(volume)
|
||||
&& let Err(e) = access(&volume_dir).await
|
||||
{
|
||||
@@ -1881,11 +1874,11 @@ impl DiskAPI for LocalDisk {
|
||||
|
||||
let mut objs_returned = 0;
|
||||
|
||||
if opts.base_dir.ends_with(SLASH_SEPARATOR_STR) {
|
||||
if opts.base_dir.ends_with(SLASH_SEPARATOR) {
|
||||
let fpath = self.get_object_path(
|
||||
&opts.bucket,
|
||||
path_join_buf(&[
|
||||
format!("{}{}", opts.base_dir.trim_end_matches(SLASH_SEPARATOR_STR), GLOBAL_DIR_SUFFIX).as_str(),
|
||||
format!("{}{}", opts.base_dir.trim_end_matches(SLASH_SEPARATOR), GLOBAL_DIR_SUFFIX).as_str(),
|
||||
STORAGE_FORMAT_FILE,
|
||||
])
|
||||
.as_str(),
|
||||
@@ -2117,7 +2110,7 @@ impl DiskAPI for LocalDisk {
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
|
||||
if let Err(e) = access(&volume_dir).await {
|
||||
if e.kind() == ErrorKind::NotFound {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
os::make_dir_all(&volume_dir, self.root.as_path()).await?;
|
||||
return Ok(());
|
||||
}
|
||||
@@ -2135,7 +2128,7 @@ impl DiskAPI for LocalDisk {
|
||||
let entries = os::read_dir(&self.root, -1).await.map_err(to_volume_error)?;
|
||||
|
||||
for entry in entries {
|
||||
if !has_suffix(&entry, SLASH_SEPARATOR_STR) || !Self::is_valid_volname(clean(&entry).as_str()) {
|
||||
if !has_suffix(&entry, SLASH_SEPARATOR) || !Self::is_valid_volname(clean(&entry).as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2247,93 +2240,20 @@ impl DiskAPI for LocalDisk {
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn read_version(
|
||||
&self,
|
||||
org_volume: &str,
|
||||
_org_volume: &str,
|
||||
volume: &str,
|
||||
path: &str,
|
||||
version_id: &str,
|
||||
opts: &ReadOptions,
|
||||
) -> Result<FileInfo> {
|
||||
if !org_volume.is_empty() {
|
||||
let org_volume_path = self.get_bucket_path(org_volume)?;
|
||||
if !skip_access_checks(org_volume) {
|
||||
access(&org_volume_path)
|
||||
.await
|
||||
.map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?;
|
||||
}
|
||||
}
|
||||
|
||||
let file_path = self.get_object_path(volume, path)?;
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
|
||||
check_path_length(file_path.to_string_lossy().as_ref())?;
|
||||
let file_dir = self.get_bucket_path(volume)?;
|
||||
|
||||
let read_data = opts.read_data;
|
||||
|
||||
let (data, _) = self
|
||||
.read_raw(volume, volume_dir.clone(), file_path, read_data)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e == DiskError::FileNotFound && !version_id.is_empty() {
|
||||
DiskError::FileVersionNotFound
|
||||
} else {
|
||||
e
|
||||
}
|
||||
})?;
|
||||
let (data, _) = self.read_raw(volume, file_dir, file_path, read_data).await?;
|
||||
|
||||
let mut fi = get_file_info(
|
||||
&data,
|
||||
volume,
|
||||
path,
|
||||
version_id,
|
||||
FileInfoOpts {
|
||||
data: read_data,
|
||||
include_free_versions: opts.incl_free_versions,
|
||||
},
|
||||
)?;
|
||||
|
||||
if opts.read_data {
|
||||
if fi.data.as_ref().is_some_and(|d| !d.is_empty()) || fi.size == 0 {
|
||||
if fi.inline_data() {
|
||||
return Ok(fi);
|
||||
}
|
||||
|
||||
if fi.size == 0 || fi.version_id.is_none_or(|v| v.is_nil()) {
|
||||
fi.set_inline_data();
|
||||
return Ok(fi);
|
||||
};
|
||||
if let Some(part) = fi.parts.first() {
|
||||
let part_path = format!("part.{}", part.number);
|
||||
let part_path = path_join_buf(&[
|
||||
path,
|
||||
fi.data_dir.map_or("".to_string(), |dir| dir.to_string()).as_str(),
|
||||
part_path.as_str(),
|
||||
]);
|
||||
let part_path = self.get_object_path(volume, part_path.as_str())?;
|
||||
if lstat(&part_path).await.is_err() {
|
||||
fi.set_inline_data();
|
||||
return Ok(fi);
|
||||
}
|
||||
}
|
||||
|
||||
fi.data = None;
|
||||
}
|
||||
|
||||
let inline = fi.transition_status.is_empty() && fi.data_dir.is_some() && fi.parts.len() == 1;
|
||||
if inline && fi.shard_file_size(fi.parts[0].actual_size) < DEFAULT_INLINE_BLOCK as i64 {
|
||||
let part_path = path_join_buf(&[
|
||||
path,
|
||||
fi.data_dir.map_or("".to_string(), |dir| dir.to_string()).as_str(),
|
||||
format!("part.{}", fi.parts[0].number).as_str(),
|
||||
]);
|
||||
let part_path = self.get_object_path(volume, part_path.as_str())?;
|
||||
|
||||
let data = self.read_all_data(volume, volume_dir, part_path.clone()).await.map_err(|e| {
|
||||
warn!("read_version read_all_data {:?} failed: {e}", part_path);
|
||||
e
|
||||
})?;
|
||||
fi.data = Some(Bytes::from(data));
|
||||
}
|
||||
}
|
||||
let fi = get_file_info(&data, volume, path, version_id, FileInfoOpts { data: read_data }).await?;
|
||||
|
||||
Ok(fi)
|
||||
}
|
||||
@@ -2357,7 +2277,7 @@ impl DiskAPI for LocalDisk {
|
||||
force_del_marker: bool,
|
||||
opts: DeleteOptions,
|
||||
) -> Result<()> {
|
||||
if path.starts_with(SLASH_SEPARATOR_STR) {
|
||||
if path.starts_with(SLASH_SEPARATOR) {
|
||||
return self
|
||||
.delete(
|
||||
volume,
|
||||
@@ -2418,7 +2338,7 @@ impl DiskAPI for LocalDisk {
|
||||
if !meta.versions.is_empty() {
|
||||
let buf = meta.marshal_msg()?;
|
||||
return self
|
||||
.write_all_meta(volume, format!("{path}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true)
|
||||
.write_all_meta(volume, format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true)
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2428,11 +2348,11 @@ impl DiskAPI for LocalDisk {
|
||||
{
|
||||
let src_path = path_join(&[
|
||||
file_path.as_path(),
|
||||
Path::new(format!("{old_data_dir}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()),
|
||||
Path::new(format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()),
|
||||
]);
|
||||
let dst_path = path_join(&[
|
||||
file_path.as_path(),
|
||||
Path::new(format!("{path}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE}").as_str()),
|
||||
Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()),
|
||||
]);
|
||||
return rename_all(src_path, dst_path, file_path).await;
|
||||
}
|
||||
@@ -2561,7 +2481,7 @@ async fn get_disk_info(drive_path: PathBuf) -> Result<(rustfs_utils::os::DiskInf
|
||||
if root_disk_threshold > 0 {
|
||||
disk_info.total <= root_disk_threshold
|
||||
} else {
|
||||
is_root_disk(&drive_path, SLASH_SEPARATOR_STR).unwrap_or_default()
|
||||
is_root_disk(&drive_path, SLASH_SEPARATOR).unwrap_or_default()
|
||||
}
|
||||
} else {
|
||||
false
|
||||
@@ -2579,7 +2499,7 @@ mod test {
|
||||
// let arr = Vec::new();
|
||||
|
||||
let vols = [
|
||||
RUSTFS_META_TMP_DELETED_BUCKET,
|
||||
super::super::RUSTFS_META_TMP_DELETED_BUCKET,
|
||||
super::super::RUSTFS_META_TMP_BUCKET,
|
||||
super::super::RUSTFS_META_MULTIPART_BUCKET,
|
||||
RUSTFS_META_BUCKET,
|
||||
@@ -2607,7 +2527,9 @@ mod test {
|
||||
|
||||
let disk = LocalDisk::new(&ep, false).await.unwrap();
|
||||
|
||||
let tmpp = disk.resolve_abs_path(Path::new(RUSTFS_META_TMP_DELETED_BUCKET)).unwrap();
|
||||
let tmpp = disk
|
||||
.resolve_abs_path(Path::new(super::super::RUSTFS_META_TMP_DELETED_BUCKET))
|
||||
.unwrap();
|
||||
|
||||
println!("ppp :{:?}", &tmpp);
|
||||
|
||||
@@ -2635,7 +2557,9 @@ mod test {
|
||||
|
||||
let disk = LocalDisk::new(&ep, false).await.unwrap();
|
||||
|
||||
let tmpp = disk.resolve_abs_path(Path::new(RUSTFS_META_TMP_DELETED_BUCKET)).unwrap();
|
||||
let tmpp = disk
|
||||
.resolve_abs_path(Path::new(super::super::RUSTFS_META_TMP_DELETED_BUCKET))
|
||||
.unwrap();
|
||||
|
||||
println!("ppp :{:?}", &tmpp);
|
||||
|
||||
|
||||
@@ -12,17 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::disk::error::Result;
|
||||
use crate::disk::error_conv::to_file_error;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use std::{
|
||||
io,
|
||||
path::{Component, Path},
|
||||
};
|
||||
|
||||
use super::error::Result;
|
||||
use crate::disk::error_conv::to_file_error;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use tokio::fs;
|
||||
use tracing::warn;
|
||||
|
||||
use super::error::DiskError;
|
||||
|
||||
/// Check path length according to OS limits.
|
||||
pub fn check_path_length(path_name: &str) -> Result<()> {
|
||||
// Apple OS X path length is limited to 1016
|
||||
@@ -116,7 +118,7 @@ pub async fn read_dir(path: impl AsRef<Path>, count: i32) -> std::io::Result<Vec
|
||||
if file_type.is_file() {
|
||||
volumes.push(name);
|
||||
} else if file_type.is_dir() {
|
||||
volumes.push(format!("{name}{SLASH_SEPARATOR_STR}"));
|
||||
volumes.push(format!("{name}{SLASH_SEPARATOR}"));
|
||||
}
|
||||
count -= 1;
|
||||
if count == 0 {
|
||||
|
||||
@@ -12,18 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host};
|
||||
use tracing::{error, info, instrument, warn};
|
||||
|
||||
use crate::{
|
||||
disk::endpoint::{Endpoint, EndpointType},
|
||||
disks_layout::DisksLayout,
|
||||
global::global_rustfs_port,
|
||||
};
|
||||
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host};
|
||||
use std::io::{Error, Result};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet, hash_map::Entry},
|
||||
io::{Error, Result},
|
||||
net::IpAddr,
|
||||
};
|
||||
use tracing::{error, info, instrument, warn};
|
||||
|
||||
/// enum for setup type.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
|
||||
@@ -108,6 +108,7 @@ pin_project! {
|
||||
inner: W,
|
||||
hash_algo: HashAlgorithm,
|
||||
shard_size: usize,
|
||||
buf: Vec<u8>,
|
||||
finished: bool,
|
||||
}
|
||||
}
|
||||
@@ -123,6 +124,7 @@ where
|
||||
inner,
|
||||
hash_algo,
|
||||
shard_size,
|
||||
buf: Vec::new(),
|
||||
finished: false,
|
||||
}
|
||||
}
|
||||
@@ -157,19 +159,19 @@ where
|
||||
|
||||
if hash_algo.size() > 0 {
|
||||
let hash = hash_algo.hash_encode(buf);
|
||||
if hash.as_ref().is_empty() {
|
||||
error!("bitrot writer write hash error: hash is empty");
|
||||
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "hash is empty"));
|
||||
}
|
||||
self.inner.write_all(hash.as_ref()).await?;
|
||||
self.buf.extend_from_slice(hash.as_ref());
|
||||
}
|
||||
|
||||
self.inner.write_all(buf).await?;
|
||||
self.buf.extend_from_slice(buf);
|
||||
|
||||
self.inner.flush().await?;
|
||||
self.inner.write_all(&self.buf).await?;
|
||||
|
||||
// self.inner.flush().await?;
|
||||
|
||||
let n = buf.len();
|
||||
|
||||
self.buf.clear();
|
||||
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,9 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotReader;
|
||||
use super::Erasure;
|
||||
use crate::disk::error::Error;
|
||||
use crate::disk::error_reduce::reduce_errs;
|
||||
use crate::erasure_coding::{BitrotReader, Erasure};
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use pin_project_lite::pin_project;
|
||||
use std::io;
|
||||
@@ -311,12 +312,11 @@ impl Erasure {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
disk::error::DiskError,
|
||||
erasure_coding::{BitrotReader, BitrotWriter},
|
||||
};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
|
||||
use crate::{disk::error::DiskError, erasure_coding::BitrotWriter};
|
||||
|
||||
use super::*;
|
||||
use std::io::Cursor;
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotWriterWrapper;
|
||||
use super::Erasure;
|
||||
use crate::disk::error::Error;
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::disk::error_reduce::{OBJECT_OP_IGNORED_ERRS, reduce_write_quorum_errs};
|
||||
use crate::erasure_coding::BitrotWriterWrapper;
|
||||
use crate::erasure_coding::Erasure;
|
||||
use bytes::Bytes;
|
||||
use futures::StreamExt;
|
||||
use futures::stream::FuturesUnordered;
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotReader;
|
||||
use super::BitrotWriterWrapper;
|
||||
use super::decode::ParallelReader;
|
||||
use crate::disk::error::{Error, Result};
|
||||
use crate::erasure_coding::BitrotReader;
|
||||
use crate::erasure_coding::BitrotWriterWrapper;
|
||||
use crate::erasure_coding::decode::ParallelReader;
|
||||
use crate::erasure_coding::encode::MultiWriter;
|
||||
use bytes::Bytes;
|
||||
use tokio::io::AsyncRead;
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod bitrot;
|
||||
pub mod decode;
|
||||
pub mod encode;
|
||||
pub mod erasure;
|
||||
pub mod heal;
|
||||
|
||||
mod bitrot;
|
||||
pub use bitrot::*;
|
||||
|
||||
pub use erasure::{Erasure, ReedSolomonEncoder, calc_shard_size};
|
||||
|
||||
@@ -12,10 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
|
||||
use crate::bucket::error::BucketMetadataError;
|
||||
use crate::disk::error::DiskError;
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
|
||||
pub type Error = StorageError;
|
||||
pub type Result<T> = core::result::Result<T, Error>;
|
||||
|
||||
@@ -12,9 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::event::targetid::TargetID;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
|
||||
use super::targetid::TargetID;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct TargetList {
|
||||
pub current_send_calls: AtomicI64,
|
||||
|
||||
@@ -14,14 +14,15 @@
|
||||
// limitations under the License.
|
||||
#![allow(unused_variables)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::bucket::metadata::BucketMetadata;
|
||||
use crate::event::name::EventName;
|
||||
use crate::event::targetlist::TargetList;
|
||||
use crate::store::ECStore;
|
||||
use crate::store_api::ObjectInfo;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct EventNotifier {
|
||||
target_list: TargetList,
|
||||
|
||||
@@ -12,7 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{admin_server_info::get_local_server_property, new_object_layer_fn, store_api::StorageAPI};
|
||||
use crate::{
|
||||
admin_server_info::get_local_server_property,
|
||||
new_object_layer_fn,
|
||||
store_api::StorageAPI,
|
||||
// utils::os::get_drive_stats,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_ADDR, heal_channel::DriveState, metrics::global_metrics};
|
||||
use rustfs_madmin::metrics::{DiskIOStats, DiskMetric, RealtimeMetrics};
|
||||
|
||||
@@ -38,7 +38,7 @@ use rustfs_common::defer;
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
|
||||
use rustfs_rio::{HashReader, WarpReader};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR_STR, encode_dir_object, path_join};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, encode_dir_object, path_join};
|
||||
use rustfs_workers::workers::Workers;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
@@ -451,10 +451,10 @@ fn path2_bucket_object_with_base_path(base_path: &str, path: &str) -> (String, S
|
||||
let trimmed_path = path
|
||||
.strip_prefix(base_path)
|
||||
.unwrap_or(path)
|
||||
.strip_prefix(SLASH_SEPARATOR_STR)
|
||||
.strip_prefix(SLASH_SEPARATOR)
|
||||
.unwrap_or(path);
|
||||
// Find the position of the first '/'
|
||||
let pos = trimmed_path.find(SLASH_SEPARATOR_STR).unwrap_or(trimmed_path.len());
|
||||
let pos = trimmed_path.find(SLASH_SEPARATOR).unwrap_or(trimmed_path.len());
|
||||
// Split into bucket and prefix
|
||||
let bucket = &trimmed_path[0..pos];
|
||||
let prefix = &trimmed_path[pos + 1..]; // +1 to skip the '/' character if it exists
|
||||
|
||||
@@ -12,14 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers};
|
||||
use std::error::Error;
|
||||
|
||||
use http::Method;
|
||||
use rustfs_common::GLOBAL_CONN_MAP;
|
||||
use rustfs_protos::{create_new_channel, proto_gen::node_service::node_service_client::NodeServiceClient};
|
||||
use std::error::Error;
|
||||
use tonic::{service::interceptor::InterceptedService, transport::Channel};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers};
|
||||
|
||||
/// 3. Subsequent calls will attempt fresh connections
|
||||
/// 4. If node is still down, connection will fail fast (3s timeout)
|
||||
pub async fn node_service_time_out_client(
|
||||
|
||||
@@ -108,19 +108,14 @@ pub fn verify_rpc_signature(url: &str, method: &Method, headers: &HeaderMap) ->
|
||||
}
|
||||
|
||||
// Generate expected signature
|
||||
|
||||
let expected_signature = generate_signature(&secret, url, method, timestamp);
|
||||
|
||||
// Compare signatures
|
||||
if signature != expected_signature {
|
||||
error!(
|
||||
"verify_rpc_signature: Invalid signature: url {}, method {}, timestamp {}, signature {}, expected_signature: {}***{}|{}",
|
||||
url,
|
||||
method,
|
||||
timestamp,
|
||||
signature,
|
||||
expected_signature.chars().next().unwrap_or('*'),
|
||||
expected_signature.chars().last().unwrap_or('*'),
|
||||
expected_signature.len()
|
||||
"verify_rpc_signature: Invalid signature: secret {}, url {}, method {}, timestamp {}, signature {}, expected_signature {}",
|
||||
secret, url, method, timestamp, signature, expected_signature
|
||||
);
|
||||
|
||||
return Err(std::io::Error::other("Invalid signature"));
|
||||
|
||||
@@ -27,6 +27,7 @@ use rustfs_madmin::{
|
||||
net::NetInfo,
|
||||
};
|
||||
use rustfs_protos::evict_failed_connection;
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
|
||||
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
|
||||
@@ -34,7 +35,6 @@ use rustfs_protos::proto_gen::node_service::{
|
||||
LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest,
|
||||
LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest,
|
||||
ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_utils::XHost;
|
||||
use serde::{Deserialize, Serialize as _};
|
||||
|
||||
@@ -12,29 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{
|
||||
disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo,
|
||||
WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE,
|
||||
get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
{
|
||||
disk_store::DiskHealthTracker,
|
||||
error::{DiskError, Error, Result},
|
||||
},
|
||||
},
|
||||
rpc::build_auth_headers,
|
||||
rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client},
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, atomic::Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
@@ -42,18 +28,37 @@ use rustfs_protos::proto_gen::node_service::{
|
||||
StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, atomic::Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::time;
|
||||
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tonic::{Request, service::interceptor::InterceptedService, transport::Channel};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError};
|
||||
use crate::{
|
||||
disk::error::{Error, Result},
|
||||
rpc::build_auth_headers,
|
||||
};
|
||||
use crate::{
|
||||
disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE,
|
||||
get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
},
|
||||
rpc::client::gen_tonic_signature_interceptor,
|
||||
};
|
||||
use crate::{
|
||||
disk::{FileReader, FileWriter},
|
||||
rpc::client::{TonicInterceptor, node_service_time_out_client},
|
||||
};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
|
||||
use tonic::{Request, service::interceptor::InterceptedService, transport::Channel};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -14,10 +14,9 @@
|
||||
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_lock::{
|
||||
LockClient, LockError, LockInfo, LockRequest, LockResponse, LockStats, LockStatus, LockType, Result,
|
||||
types::{LockId, LockMetadata, LockPriority},
|
||||
};
|
||||
use rustfs_lock::types::{LockId, LockMetadata, LockPriority};
|
||||
use rustfs_lock::{LockClient, LockError, LockInfo, LockResponse, LockStats, LockStatus, Result};
|
||||
use rustfs_lock::{LockRequest, LockType};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{GenerallyLockRequest, PingRequest};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -82,7 +82,7 @@ use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX,
|
||||
use rustfs_utils::{
|
||||
HashAlgorithm,
|
||||
crypto::hex,
|
||||
path::{SLASH_SEPARATOR_STR, encode_dir_object, has_suffix, path_join_buf},
|
||||
path::{SLASH_SEPARATOR, encode_dir_object, has_suffix, path_join_buf},
|
||||
};
|
||||
use rustfs_workers::workers::Workers;
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
@@ -1485,8 +1485,20 @@ impl SetDisks {
|
||||
let object = object.clone();
|
||||
let version_id = version_id.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await
|
||||
if let Some(disk) = disk
|
||||
&& disk.is_online().await
|
||||
{
|
||||
if version_id.is_empty() {
|
||||
match disk.read_xl(&bucket, &object, read_data).await {
|
||||
Ok(info) => {
|
||||
let fi = file_info_from_raw(info, &bucket, &object, read_data).await?;
|
||||
Ok(fi)
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
} else {
|
||||
disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await
|
||||
}
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
@@ -1614,7 +1626,7 @@ impl SetDisks {
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
read_data: bool,
|
||||
incl_free_vers: bool,
|
||||
_incl_free_vers: bool,
|
||||
) -> (Vec<FileInfo>, Vec<Option<DiskError>>) {
|
||||
let mut metadata_array = vec![None; fileinfos.len()];
|
||||
let mut meta_file_infos = vec![FileInfo::default(); fileinfos.len()];
|
||||
@@ -1664,7 +1676,7 @@ impl SetDisks {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let finfo = match meta.into_fileinfo(bucket, object, "", true, incl_free_vers, true) {
|
||||
let finfo = match meta.into_fileinfo(bucket, object, "", true, true) {
|
||||
Ok(res) => res,
|
||||
Err(err) => {
|
||||
for item in errs.iter_mut() {
|
||||
@@ -1691,7 +1703,7 @@ impl SetDisks {
|
||||
|
||||
for (idx, meta_op) in metadata_array.iter().enumerate() {
|
||||
if let Some(meta) = meta_op {
|
||||
match meta.into_fileinfo(bucket, object, vid.to_string().as_str(), read_data, incl_free_vers, true) {
|
||||
match meta.into_fileinfo(bucket, object, vid.to_string().as_str(), read_data, true) {
|
||||
Ok(res) => meta_file_infos[idx] = res,
|
||||
Err(err) => errs[idx] = Some(err.into()),
|
||||
}
|
||||
@@ -4614,9 +4626,7 @@ impl StorageAPI for SetDisks {
|
||||
.await
|
||||
.map_err(|e| to_object_err(e, vec![bucket, object]))?;
|
||||
|
||||
let mut obj_info = ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended);
|
||||
obj_info.size = goi.size;
|
||||
Ok(obj_info)
|
||||
Ok(ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
@@ -5326,7 +5336,7 @@ impl StorageAPI for SetDisks {
|
||||
&upload_id_path,
|
||||
fi.data_dir.map(|v| v.to_string()).unwrap_or_default().as_str(),
|
||||
]),
|
||||
SLASH_SEPARATOR_STR
|
||||
SLASH_SEPARATOR
|
||||
);
|
||||
|
||||
let mut part_numbers = match Self::list_parts(&online_disks, &part_path, read_quorum).await {
|
||||
@@ -5464,7 +5474,7 @@ impl StorageAPI for SetDisks {
|
||||
let mut populated_upload_ids = HashSet::new();
|
||||
|
||||
for upload_id in upload_ids.iter() {
|
||||
let upload_id = upload_id.trim_end_matches(SLASH_SEPARATOR_STR).to_string();
|
||||
let upload_id = upload_id.trim_end_matches(SLASH_SEPARATOR).to_string();
|
||||
if populated_upload_ids.contains(&upload_id) {
|
||||
continue;
|
||||
}
|
||||
@@ -6224,7 +6234,7 @@ impl StorageAPI for SetDisks {
|
||||
None
|
||||
};
|
||||
|
||||
if has_suffix(object, SLASH_SEPARATOR_STR) {
|
||||
if has_suffix(object, SLASH_SEPARATOR) {
|
||||
let (result, err) = self.heal_object_dir_locked(bucket, object, opts.dry_run, opts.remove).await?;
|
||||
return Ok((result, err.map(|e| e.into())));
|
||||
}
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_api::{ListPartsInfo, ObjectInfoOrErr, WalkOptions};
|
||||
@@ -42,16 +44,17 @@ use rustfs_common::{
|
||||
heal_channel::{DriveState, HealItemType},
|
||||
};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
use tokio::sync::broadcast::{Receiver, Sender};
|
||||
use tokio::time::Duration;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Sets {
|
||||
|
||||
@@ -1825,16 +1825,16 @@ impl StorageAPI for ECStore {
|
||||
if self.is_suspended(pool.pool_idx).await {
|
||||
continue;
|
||||
}
|
||||
return match pool
|
||||
match pool
|
||||
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
|
||||
.await
|
||||
{
|
||||
Ok(res) => Ok(res),
|
||||
Ok(res) => return Ok(res),
|
||||
Err(err) => {
|
||||
if is_err_invalid_upload_id(&err) {
|
||||
continue;
|
||||
}
|
||||
Err(err)
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -2204,7 +2204,7 @@ impl StorageAPI for ECStore {
|
||||
async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, force_del_marker: bool) -> Result<()> {
|
||||
check_del_obj_args(bucket, object)?;
|
||||
|
||||
let object = encode_dir_object(object);
|
||||
let object = rustfs_utils::path::encode_dir_object(object);
|
||||
|
||||
if self.single_pool() {
|
||||
return self.pools[0]
|
||||
@@ -2324,15 +2324,17 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// No pool returned a nil error, return the first non 'not found' error
|
||||
for (index, err) in errs.iter().enumerate() {
|
||||
return match err {
|
||||
match err {
|
||||
Some(err) => {
|
||||
if is_err_object_not_found(err) || is_err_version_not_found(err) {
|
||||
continue;
|
||||
}
|
||||
Ok((ress.remove(index), Some(err.clone())))
|
||||
return Ok((ress.remove(index), Some(err.clone())));
|
||||
}
|
||||
None => Ok((ress.remove(index), None)),
|
||||
};
|
||||
None => {
|
||||
return Ok((ress.remove(index), None));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// At this stage, all errors are 'not found'
|
||||
|
||||
@@ -28,15 +28,14 @@ use http::{HeaderMap, HeaderValue};
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_filemeta::{
|
||||
FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, REPLICATION_RESET, REPLICATION_STATUS, ReplicateDecision, ReplicationState,
|
||||
ReplicationStatusType, RestoreStatusOps as _, VersionPurgeStatusType, parse_restore_obj_status, replication_statuses_map,
|
||||
version_purge_statuses_map,
|
||||
ReplicationStatusType, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
|
||||
};
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_rio::Checksum;
|
||||
use rustfs_rio::{DecompressReader, HashReader, LimitReader, WarpReader};
|
||||
use rustfs_utils::CompressionAlgorithm;
|
||||
use rustfs_utils::http::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER};
|
||||
use rustfs_utils::http::{AMZ_BUCKET_REPLICATION_STATUS, AMZ_RESTORE, AMZ_STORAGE_CLASS};
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
@@ -757,24 +756,7 @@ impl ObjectInfo {
|
||||
.ok()
|
||||
});
|
||||
|
||||
let replication_status_internal = fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.and_then(|v| v.replication_status_internal.clone());
|
||||
let version_purge_status_internal = fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.and_then(|v| v.version_purge_status_internal.clone());
|
||||
|
||||
let mut replication_status = fi.replication_status();
|
||||
if replication_status.is_empty()
|
||||
&& let Some(status) = fi.metadata.get(AMZ_BUCKET_REPLICATION_STATUS).cloned()
|
||||
&& status == ReplicationStatusType::Replica.as_str()
|
||||
{
|
||||
replication_status = ReplicationStatusType::Replica;
|
||||
}
|
||||
|
||||
let version_purge_status = fi.version_purge_status();
|
||||
// TODO:ReplicationState
|
||||
|
||||
let transitioned_object = TransitionedObject {
|
||||
name: fi.transitioned_objname.clone(),
|
||||
@@ -795,24 +777,10 @@ impl ObjectInfo {
|
||||
};
|
||||
|
||||
// Extract storage class from metadata, default to STANDARD if not found
|
||||
let storage_class = if !fi.transition_tier.is_empty() {
|
||||
Some(fi.transition_tier.clone())
|
||||
} else {
|
||||
fi.metadata
|
||||
.get(AMZ_STORAGE_CLASS)
|
||||
.cloned()
|
||||
.or_else(|| Some(storageclass::STANDARD.to_string()))
|
||||
};
|
||||
|
||||
let mut restore_ongoing = false;
|
||||
let mut restore_expires = None;
|
||||
if let Some(restore_status) = fi.metadata.get(AMZ_RESTORE).cloned() {
|
||||
//
|
||||
if let Ok(restore_status) = parse_restore_obj_status(&restore_status) {
|
||||
restore_ongoing = restore_status.on_going();
|
||||
restore_expires = restore_status.expiry();
|
||||
}
|
||||
}
|
||||
let storage_class = metadata
|
||||
.get(AMZ_STORAGE_CLASS)
|
||||
.cloned()
|
||||
.or_else(|| Some(storageclass::STANDARD.to_string()));
|
||||
|
||||
// Convert parts from rustfs_filemeta::ObjectPartInfo to store_api::ObjectPartInfo
|
||||
let parts = fi
|
||||
@@ -830,8 +798,6 @@ impl ObjectInfo {
|
||||
})
|
||||
.collect();
|
||||
|
||||
// TODO: part checksums
|
||||
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name,
|
||||
@@ -856,12 +822,6 @@ impl ObjectInfo {
|
||||
transitioned_object,
|
||||
checksum: fi.checksum.clone(),
|
||||
storage_class,
|
||||
restore_ongoing,
|
||||
restore_expires,
|
||||
replication_status_internal,
|
||||
replication_status,
|
||||
version_purge_status_internal,
|
||||
version_purge_status,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ use crate::{
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use std::collections::{HashMap, hash_map::Entry};
|
||||
|
||||
use tracing::{info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ use rustfs_filemeta::{
|
||||
MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
|
||||
merge_file_meta_versions,
|
||||
};
|
||||
use rustfs_utils::path::{self, SLASH_SEPARATOR_STR, base_dir_from_prefix};
|
||||
use rustfs_utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast::{self};
|
||||
@@ -132,7 +132,7 @@ impl ListPathOptions {
|
||||
return;
|
||||
}
|
||||
|
||||
let s = SLASH_SEPARATOR_STR.chars().next().unwrap_or_default();
|
||||
let s = SLASH_SEPARATOR.chars().next().unwrap_or_default();
|
||||
self.filter_prefix = {
|
||||
let fp = self.prefix.trim_start_matches(&self.base_dir).trim_matches(s);
|
||||
|
||||
@@ -346,7 +346,7 @@ impl ECStore {
|
||||
if let Some(delimiter) = &delimiter {
|
||||
if obj.is_dir && obj.mod_time.is_none() {
|
||||
let mut found = false;
|
||||
if delimiter != SLASH_SEPARATOR_STR {
|
||||
if delimiter != SLASH_SEPARATOR {
|
||||
for p in prefixes.iter() {
|
||||
if found {
|
||||
break;
|
||||
@@ -410,13 +410,13 @@ impl ECStore {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut list_result = self
|
||||
.list_path(&opts)
|
||||
.await
|
||||
.unwrap_or_else(|err| MetaCacheEntriesSortedResult {
|
||||
let mut list_result = match self.list_path(&opts).await {
|
||||
Ok(res) => res,
|
||||
Err(err) => MetaCacheEntriesSortedResult {
|
||||
err: Some(err.into()),
|
||||
..Default::default()
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
if let Some(err) = list_result.err.clone()
|
||||
&& err != rustfs_filemeta::Error::Unexpected
|
||||
@@ -470,7 +470,7 @@ impl ECStore {
|
||||
if let Some(delimiter) = &delimiter {
|
||||
if obj.is_dir && obj.mod_time.is_none() {
|
||||
let mut found = false;
|
||||
if delimiter != SLASH_SEPARATOR_STR {
|
||||
if delimiter != SLASH_SEPARATOR {
|
||||
for p in prefixes.iter() {
|
||||
if found {
|
||||
break;
|
||||
@@ -502,7 +502,7 @@ impl ECStore {
|
||||
// warn!("list_path opt {:?}", &o);
|
||||
|
||||
check_list_objs_args(&o.bucket, &o.prefix, &o.marker)?;
|
||||
// if opts.prefix.ends_with(SLASH_SEPARATOR_STR) {
|
||||
// if opts.prefix.ends_with(SLASH_SEPARATOR) {
|
||||
// return Err(Error::msg("eof"));
|
||||
// }
|
||||
|
||||
@@ -520,11 +520,11 @@ impl ECStore {
|
||||
return Err(Error::Unexpected);
|
||||
}
|
||||
|
||||
if o.prefix.starts_with(SLASH_SEPARATOR_STR) {
|
||||
if o.prefix.starts_with(SLASH_SEPARATOR) {
|
||||
return Err(Error::Unexpected);
|
||||
}
|
||||
|
||||
let slash_separator = Some(SLASH_SEPARATOR_STR.to_owned());
|
||||
let slash_separator = Some(SLASH_SEPARATOR.to_owned());
|
||||
|
||||
o.include_directories = o.separator == slash_separator;
|
||||
|
||||
@@ -774,8 +774,8 @@ impl ECStore {
|
||||
let mut filter_prefix = {
|
||||
prefix
|
||||
.trim_start_matches(&path)
|
||||
.trim_start_matches(SLASH_SEPARATOR_STR)
|
||||
.trim_end_matches(SLASH_SEPARATOR_STR)
|
||||
.trim_start_matches(SLASH_SEPARATOR)
|
||||
.trim_end_matches(SLASH_SEPARATOR)
|
||||
.to_owned()
|
||||
};
|
||||
|
||||
@@ -988,7 +988,7 @@ async fn gather_results(
|
||||
}
|
||||
|
||||
if let Some(marker) = &opts.marker
|
||||
&& &entry.name <= marker
|
||||
&& &entry.name < marker
|
||||
{
|
||||
continue;
|
||||
}
|
||||
@@ -1130,7 +1130,7 @@ async fn merge_entry_channels(
|
||||
if path::clean(&best_entry.name) == path::clean(&other_entry.name) {
|
||||
let dir_matches = best_entry.is_dir() && other_entry.is_dir();
|
||||
let suffix_matches =
|
||||
best_entry.name.ends_with(SLASH_SEPARATOR_STR) == other_entry.name.ends_with(SLASH_SEPARATOR_STR);
|
||||
best_entry.name.ends_with(SLASH_SEPARATOR) == other_entry.name.ends_with(SLASH_SEPARATOR);
|
||||
|
||||
if dir_matches && suffix_matches {
|
||||
to_merge.push(other_idx);
|
||||
@@ -1476,6 +1476,7 @@ mod test {
|
||||
// use crate::error::Error;
|
||||
// use crate::metacache::writer::MetacacheReader;
|
||||
// use crate::set_disk::SetDisks;
|
||||
// use crate::store::ECStore;
|
||||
// use crate::store_list_objects::ListPathOptions;
|
||||
// use crate::store_list_objects::WalkOptions;
|
||||
// use crate::store_list_objects::WalkVersionsSortOrder;
|
||||
|
||||
@@ -51,7 +51,7 @@ use crate::{
|
||||
store_api::{ObjectOptions, PutObjReader},
|
||||
};
|
||||
use rustfs_rio::HashReader;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join};
|
||||
use s3s::S3ErrorCode;
|
||||
|
||||
use super::{
|
||||
@@ -403,7 +403,7 @@ impl TierConfigMgr {
|
||||
pub async fn save_tiering_config<S: StorageAPI>(&self, api: Arc<S>) -> std::result::Result<(), std::io::Error> {
|
||||
let data = self.marshal()?;
|
||||
|
||||
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR_STR, TIER_CONFIG_FILE);
|
||||
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, TIER_CONFIG_FILE);
|
||||
|
||||
self.save_config(api, &config_file, data).await
|
||||
}
|
||||
@@ -483,7 +483,7 @@ async fn new_and_save_tiering_config<S: StorageAPI>(api: Arc<S>) -> Result<TierC
|
||||
|
||||
#[tracing::instrument(level = "debug", name = "load_tier_config", skip(api))]
|
||||
async fn load_tier_config(api: Arc<ECStore>) -> std::result::Result<TierConfigMgr, std::io::Error> {
|
||||
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR_STR, TIER_CONFIG_FILE);
|
||||
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, TIER_CONFIG_FILE);
|
||||
let data = read_config(api.clone(), config_file.as_str()).await;
|
||||
if let Err(err) = data {
|
||||
if is_err_config_not_found(&err) {
|
||||
|
||||
@@ -30,11 +30,13 @@ use crate::client::{
|
||||
transition_api::{Options, TransitionClient, TransitionCore},
|
||||
transition_api::{ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::error::error_resp_to_object_err;
|
||||
use crate::tier::{
|
||||
tier_config::TierS3,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
|
||||
pub struct WarmBackendS3 {
|
||||
pub client: Arc<TransitionClient>,
|
||||
@@ -176,7 +178,7 @@ impl WarmBackend for WarmBackendS3 {
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
let result = self
|
||||
.core
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR_STR, 1)
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
|
||||
.await?;
|
||||
|
||||
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)
|
||||
|
||||
@@ -27,11 +27,19 @@ use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
|
||||
use crate::client::transition_api::{ReadCloser, ReaderImpl};
|
||||
use crate::client::{
|
||||
api_get_options::GetObjectOptions,
|
||||
api_put_object::PutObjectOptions,
|
||||
api_remove::RemoveObjectOptions,
|
||||
transition_api::{ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::error::error_resp_to_object_err;
|
||||
use crate::tier::{
|
||||
tier_config::TierS3,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
|
||||
pub struct WarmBackendS3 {
|
||||
pub client: Arc<Client>,
|
||||
|
||||
@@ -505,10 +505,6 @@ impl FileInfo {
|
||||
ReplicationStatusType::Empty
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shard_file_size(&self, total_length: i64) -> i64 {
|
||||
self.erasure.shard_file_size(total_length)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
@@ -594,7 +590,7 @@ impl RestoreStatusOps for RestoreStatus {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
let tokens: Vec<&str> = restore_hdr.splitn(2, ",").collect();
|
||||
let progress_tokens: Vec<&str> = tokens[0].splitn(2, "=").collect();
|
||||
if progress_tokens.len() != 2 {
|
||||
|
||||
@@ -14,8 +14,7 @@
|
||||
|
||||
use crate::{
|
||||
ErasureAlgo, ErasureInfo, Error, FileInfo, FileInfoVersions, InlineData, ObjectPartInfo, RawFileInfo, ReplicationState,
|
||||
ReplicationStatusType, Result, TIER_FV_ID, TIER_FV_MARKER, VersionPurgeStatusType, replication_statuses_map,
|
||||
version_purge_statuses_map,
|
||||
ReplicationStatusType, Result, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
|
||||
};
|
||||
use byteorder::ByteOrder;
|
||||
use bytes::Bytes;
|
||||
@@ -910,7 +909,6 @@ impl FileMeta {
|
||||
path: &str,
|
||||
version_id: &str,
|
||||
read_data: bool,
|
||||
include_free_versions: bool,
|
||||
all_parts: bool,
|
||||
) -> Result<FileInfo> {
|
||||
let vid = {
|
||||
@@ -923,35 +921,11 @@ impl FileMeta {
|
||||
|
||||
let mut is_latest = true;
|
||||
let mut succ_mod_time = None;
|
||||
let mut non_free_versions = self.versions.len();
|
||||
|
||||
let mut found = false;
|
||||
let mut found_free_version = None;
|
||||
let mut found_fi = None;
|
||||
|
||||
for ver in self.versions.iter() {
|
||||
let header = &ver.header;
|
||||
|
||||
// TODO: freeVersion
|
||||
if header.free_version() {
|
||||
non_free_versions -= 1;
|
||||
if include_free_versions && found_free_version.is_none() {
|
||||
let mut found_free_fi = FileMetaVersion::default();
|
||||
if found_free_fi.unmarshal_msg(&ver.meta).is_ok() && found_free_fi.version_type != VersionType::Invalid {
|
||||
let mut free_fi = found_free_fi.into_fileinfo(volume, path, all_parts);
|
||||
free_fi.is_latest = true;
|
||||
found_free_version = Some(free_fi);
|
||||
}
|
||||
}
|
||||
|
||||
if header.version_id != Some(vid) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !version_id.is_empty() && header.version_id != Some(vid) {
|
||||
is_latest = false;
|
||||
@@ -959,8 +933,6 @@ impl FileMeta {
|
||||
continue;
|
||||
}
|
||||
|
||||
found = true;
|
||||
|
||||
let mut fi = ver.into_fileinfo(volume, path, all_parts)?;
|
||||
fi.is_latest = is_latest;
|
||||
|
||||
@@ -975,25 +947,7 @@ impl FileMeta {
|
||||
.map(bytes::Bytes::from);
|
||||
}
|
||||
|
||||
found_fi = Some(fi);
|
||||
}
|
||||
|
||||
if !found {
|
||||
if version_id.is_empty() {
|
||||
if include_free_versions
|
||||
&& non_free_versions == 0
|
||||
&& let Some(free_version) = found_free_version
|
||||
{
|
||||
return Ok(free_version);
|
||||
}
|
||||
return Err(Error::FileNotFound);
|
||||
} else {
|
||||
return Err(Error::FileVersionNotFound);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mut fi) = found_fi {
|
||||
fi.num_versions = non_free_versions;
|
||||
fi.num_versions = self.versions.len();
|
||||
|
||||
return Ok(fi);
|
||||
}
|
||||
@@ -1813,27 +1767,14 @@ impl MetaObject {
|
||||
metadata.insert(k.to_owned(), v.to_owned());
|
||||
}
|
||||
|
||||
let tier_fvidkey = format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_ID}").to_lowercase();
|
||||
let tier_fvmarker_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_MARKER}").to_lowercase();
|
||||
|
||||
for (k, v) in &self.meta_sys {
|
||||
let lower_k = k.to_lowercase();
|
||||
|
||||
if lower_k == tier_fvidkey || lower_k == tier_fvmarker_key {
|
||||
continue;
|
||||
}
|
||||
|
||||
if lower_k == VERSION_PURGE_STATUS_KEY.to_lowercase() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if lower_k == AMZ_STORAGE_CLASS.to_lowercase() && v == b"STANDARD" {
|
||||
if k == AMZ_STORAGE_CLASS && v == b"STANDARD" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if k.starts_with(RESERVED_METADATA_PREFIX)
|
||||
|| k.starts_with(RESERVED_METADATA_PREFIX_LOWER)
|
||||
|| lower_k == VERSION_PURGE_STATUS_KEY.to_lowercase()
|
||||
|| k == VERSION_PURGE_STATUS_KEY
|
||||
{
|
||||
metadata.insert(k.to_owned(), String::from_utf8(v.to_owned()).unwrap_or_default());
|
||||
}
|
||||
@@ -2570,31 +2511,15 @@ pub fn merge_file_meta_versions(
|
||||
merged
|
||||
}
|
||||
|
||||
pub fn file_info_from_raw(
|
||||
ri: RawFileInfo,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
read_data: bool,
|
||||
include_free_versions: bool,
|
||||
) -> Result<FileInfo> {
|
||||
get_file_info(
|
||||
&ri.buf,
|
||||
bucket,
|
||||
object,
|
||||
"",
|
||||
FileInfoOpts {
|
||||
data: read_data,
|
||||
include_free_versions,
|
||||
},
|
||||
)
|
||||
pub async fn file_info_from_raw(ri: RawFileInfo, bucket: &str, object: &str, read_data: bool) -> Result<FileInfo> {
|
||||
get_file_info(&ri.buf, bucket, object, "", FileInfoOpts { data: read_data }).await
|
||||
}
|
||||
|
||||
pub struct FileInfoOpts {
|
||||
pub data: bool,
|
||||
pub include_free_versions: bool,
|
||||
}
|
||||
|
||||
pub fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opts: FileInfoOpts) -> Result<FileInfo> {
|
||||
pub async fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opts: FileInfoOpts) -> Result<FileInfo> {
|
||||
let vid = {
|
||||
if version_id.is_empty() {
|
||||
None
|
||||
@@ -2616,7 +2541,7 @@ pub fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opt
|
||||
});
|
||||
}
|
||||
|
||||
let fi = meta.into_fileinfo(volume, path, version_id, opts.data, opts.include_free_versions, true)?;
|
||||
let fi = meta.into_fileinfo(volume, path, version_id, opts.data, true)?;
|
||||
Ok(fi)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,10 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{
|
||||
Error, FileInfo, FileInfoOpts, FileInfoVersions, FileMeta, FileMetaShallowVersion, Result, VersionType, get_file_info,
|
||||
merge_file_meta_versions,
|
||||
};
|
||||
use crate::{Error, FileInfo, FileInfoVersions, FileMeta, FileMetaShallowVersion, Result, VersionType, merge_file_meta_versions};
|
||||
use rmp::Marker;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::Ordering;
|
||||
@@ -144,7 +141,8 @@ impl MetaCacheEntry {
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(fm) = &self.cached {
|
||||
if self.cached.is_some() {
|
||||
let fm = self.cached.as_ref().unwrap();
|
||||
if fm.versions.is_empty() {
|
||||
return Ok(FileInfo {
|
||||
volume: bucket.to_owned(),
|
||||
@@ -156,20 +154,14 @@ impl MetaCacheEntry {
|
||||
});
|
||||
}
|
||||
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false, true)?;
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
return Ok(fi);
|
||||
}
|
||||
|
||||
get_file_info(
|
||||
&self.metadata,
|
||||
bucket,
|
||||
self.name.as_str(),
|
||||
"",
|
||||
FileInfoOpts {
|
||||
data: false,
|
||||
include_free_versions: false,
|
||||
},
|
||||
)
|
||||
let mut fm = FileMeta::new();
|
||||
fm.unmarshal_msg(&self.metadata)?;
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
Ok(fi)
|
||||
}
|
||||
|
||||
pub fn file_info_versions(&self, bucket: &str) -> Result<FileInfoVersions> {
|
||||
|
||||
@@ -32,7 +32,7 @@ use rustfs_ecstore::{
|
||||
store_api::{ObjectInfo, ObjectOptions},
|
||||
};
|
||||
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join_buf};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::sync::LazyLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
@@ -182,7 +182,7 @@ impl ObjectStore {
|
||||
} else {
|
||||
info.name
|
||||
};
|
||||
let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR_STR);
|
||||
let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR);
|
||||
let _ = sender
|
||||
.send(StringOrErr {
|
||||
item: Some(name.to_owned()),
|
||||
|
||||
1
crates/kms/.gitignore
vendored
1
crates/kms/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
examples/local_data/*
|
||||
@@ -55,7 +55,6 @@ moka = { workspace = true, features = ["future"] }
|
||||
|
||||
# Additional dependencies
|
||||
md5 = { workspace = true }
|
||||
arc-swap = { workspace = true }
|
||||
|
||||
# HTTP client for Vault
|
||||
reqwest = { workspace = true }
|
||||
|
||||
@@ -1,251 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! KMS Demo - Comprehensive example demonstrating RustFS KMS capabilities
|
||||
//!
|
||||
//! This example demonstrates:
|
||||
//! - Initializing and configuring KMS service
|
||||
//! - Creating master keys
|
||||
//! - Generating data encryption keys
|
||||
//! - Encrypting and decrypting data using high-level APIs
|
||||
//! - Key management operations
|
||||
//! - Cache statistics
|
||||
//!
|
||||
//! Run with: `cargo run --example demo1`
|
||||
|
||||
use rustfs_kms::{
|
||||
CreateKeyRequest, DescribeKeyRequest, EncryptionAlgorithm, GenerateDataKeyRequest, KeySpec, KeyUsage, KmsConfig,
|
||||
ListKeysRequest, init_global_kms_service_manager,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Note: Tracing is optional - if tracing-subscriber is not available,
|
||||
// the example will still work but with less detailed logging
|
||||
|
||||
println!("=== RustFS KMS Demo ===\n");
|
||||
|
||||
// Step 1: Initialize global KMS service manager
|
||||
println!("1. Initializing KMS service manager...");
|
||||
let service_manager = init_global_kms_service_manager();
|
||||
println!(" ✓ Service manager initialized\n");
|
||||
|
||||
// Step 2: Create a temporary directory for local backend
|
||||
println!("2. Setting up local backend...");
|
||||
if !fs::metadata("examples/local_data").is_ok() {
|
||||
fs::create_dir_all("examples/local_data")?;
|
||||
}
|
||||
let data_dir = std::path::PathBuf::from("examples/local_data");
|
||||
println!(" ✓ Using data directory: {}\n", data_dir.display());
|
||||
|
||||
// Step 3: Configure KMS with local backend
|
||||
println!("3. Configuring KMS with local backend...");
|
||||
let config = KmsConfig::local(data_dir)
|
||||
.with_default_key("demo-key-default-1".to_string())
|
||||
.with_cache(true);
|
||||
service_manager.configure(config).await?;
|
||||
println!(" ✓ KMS configured\n");
|
||||
|
||||
// Step 4: Start the KMS service
|
||||
println!("4. Starting KMS service...");
|
||||
service_manager.start().await?;
|
||||
println!(" ✓ KMS service started\n");
|
||||
|
||||
// Step 5: Get the encryption service
|
||||
println!("5. Getting encryption service...");
|
||||
let encryption_service = rustfs_kms::get_global_encryption_service()
|
||||
.await
|
||||
.ok_or("Encryption service not available")?;
|
||||
println!(" ✓ Encryption service obtained\n");
|
||||
|
||||
// Step 6: Create a master key
|
||||
println!("6. Creating a master key...");
|
||||
let create_request = CreateKeyRequest {
|
||||
key_name: Some("demo-key-master-1".to_string()),
|
||||
key_usage: KeyUsage::EncryptDecrypt,
|
||||
description: Some("Demo master key for encryption".to_string()),
|
||||
policy: None,
|
||||
tags: {
|
||||
let mut tags = HashMap::new();
|
||||
tags.insert("environment".to_string(), "demo".to_string());
|
||||
tags.insert("purpose".to_string(), "testing".to_string());
|
||||
tags
|
||||
},
|
||||
origin: Some("demo1.rs".to_string()),
|
||||
};
|
||||
|
||||
let create_response = encryption_service.create_key(create_request).await?;
|
||||
println!(" ✓ Master key created:");
|
||||
println!(" - Key ID: {}", create_response.key_id);
|
||||
println!(" - Key State: {:?}", create_response.key_metadata.key_state);
|
||||
println!(" - Key Usage: {:?}", create_response.key_metadata.key_usage);
|
||||
println!(" - Created: {}\n", create_response.key_metadata.creation_date);
|
||||
|
||||
let master_key_id = create_response.key_id.clone();
|
||||
|
||||
// Step 7: Describe the key
|
||||
println!("7. Describing the master key...");
|
||||
let describe_request = DescribeKeyRequest {
|
||||
key_id: master_key_id.clone(),
|
||||
};
|
||||
let describe_response = encryption_service.describe_key(describe_request).await?;
|
||||
let metadata = describe_response.key_metadata;
|
||||
println!(" ✓ Key details:");
|
||||
println!(" - Key ID: {}", metadata.key_id);
|
||||
println!(" - Description: {:?}", metadata.description);
|
||||
println!(" - Key Usage: {:?}", metadata.key_usage);
|
||||
println!(" - Key State: {:?}", metadata.key_state);
|
||||
println!(" - Tags: {:?}\n", metadata.tags);
|
||||
|
||||
// Step 8: Generate a data encryption key (OPTIONAL - for demonstration only)
|
||||
// NOTE: This step is OPTIONAL and only for educational purposes!
|
||||
// In real usage, you can skip this step and go directly to Step 9.
|
||||
// encrypt_object() will automatically generate a data key internally.
|
||||
println!("8. [OPTIONAL] Generating a data encryption key (for demonstration)...");
|
||||
println!(" ⚠️ This step is OPTIONAL - only for understanding the two-layer key architecture:");
|
||||
println!(" - Master Key (CMK): Used to encrypt/decrypt data keys");
|
||||
println!(" - Data Key (DEK): Used to encrypt/decrypt actual data");
|
||||
println!(" In production, you can skip this and use encrypt_object() directly!\n");
|
||||
|
||||
let data_key_request = GenerateDataKeyRequest {
|
||||
key_id: master_key_id.clone(),
|
||||
key_spec: KeySpec::Aes256,
|
||||
encryption_context: {
|
||||
let mut context = HashMap::new();
|
||||
context.insert("bucket".to_string(), "demo-bucket".to_string());
|
||||
context.insert("object_key".to_string(), "demo-object.txt".to_string());
|
||||
context
|
||||
},
|
||||
};
|
||||
|
||||
let data_key_response = encryption_service.generate_data_key(data_key_request).await?;
|
||||
println!(" ✓ Data key generated (for demonstration):");
|
||||
println!(" - Master Key ID: {}", data_key_response.key_id);
|
||||
println!(" - Data Key (plaintext) length: {} bytes", data_key_response.plaintext_key.len());
|
||||
println!(
|
||||
" - Encrypted Data Key (ciphertext blob) length: {} bytes",
|
||||
data_key_response.ciphertext_blob.len()
|
||||
);
|
||||
println!(" - Note: This data key is NOT used in Step 9 - encrypt_object() generates its own!\n");
|
||||
|
||||
// Step 9: Encrypt some data using high-level API
|
||||
// This is the RECOMMENDED way to encrypt data - everything is handled automatically!
|
||||
println!("9. Encrypting data using object encryption service (RECOMMENDED)...");
|
||||
println!(" ✅ This is all you need! encrypt_object() handles everything:");
|
||||
println!(" 1. Validates/creates the master key (if needed)");
|
||||
println!(" 2. Generates a NEW data key using the master key (independent of Step 8)");
|
||||
println!(" 3. Uses the data key to encrypt the actual data");
|
||||
println!(" 4. Stores the encrypted data key (ciphertext blob) in metadata");
|
||||
println!(" You only need to provide the master_key_id - everything else is handled!\n");
|
||||
|
||||
let plaintext = b"Hello, RustFS KMS! This is a test message for encryption.";
|
||||
println!(" Plaintext: {}", String::from_utf8_lossy(plaintext));
|
||||
|
||||
let reader = Cursor::new(plaintext);
|
||||
// Just provide the master_key_id - encrypt_object() handles everything internally!
|
||||
let encryption_result = encryption_service
|
||||
.encrypt_object(
|
||||
"demo-bucket",
|
||||
"demo-object.txt",
|
||||
reader,
|
||||
&EncryptionAlgorithm::Aes256,
|
||||
Some(&master_key_id), // Only need to provide master key ID
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!(" ✓ Data encrypted:");
|
||||
println!(" - Encrypted data length: {} bytes", encryption_result.ciphertext.len());
|
||||
println!(" - Algorithm: {}", encryption_result.metadata.algorithm);
|
||||
println!(
|
||||
" - Master Key ID: {} (used to encrypt the data key)",
|
||||
encryption_result.metadata.key_id
|
||||
);
|
||||
println!(
|
||||
" - Encrypted Data Key length: {} bytes (stored in metadata)",
|
||||
encryption_result.metadata.encrypted_data_key.len()
|
||||
);
|
||||
println!(" - Original size: {} bytes\n", encryption_result.metadata.original_size);
|
||||
|
||||
// Step 10: Decrypt the data using high-level API
|
||||
println!("10. Decrypting data...");
|
||||
println!(" Note: decrypt_object() has the ENTIRE decryption flow built-in:");
|
||||
println!(" 1. Extracts the encrypted data key from metadata");
|
||||
println!(" 2. Uses master key to decrypt the data key");
|
||||
println!(" 3. Uses the decrypted data key to decrypt the actual data");
|
||||
println!(" You only need to provide the encrypted data and metadata!\n");
|
||||
|
||||
let mut decrypted_reader = encryption_service
|
||||
.decrypt_object(
|
||||
"demo-bucket",
|
||||
"demo-object.txt",
|
||||
encryption_result.ciphertext.clone(),
|
||||
&encryption_result.metadata, // Contains everything needed for decryption
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut decrypted_data = Vec::new();
|
||||
decrypted_reader.read_to_end(&mut decrypted_data).await?;
|
||||
|
||||
println!(" ✓ Data decrypted:");
|
||||
println!(" - Decrypted text: {}\n", String::from_utf8_lossy(&decrypted_data));
|
||||
|
||||
// Verify decryption
|
||||
assert_eq!(plaintext, decrypted_data.as_slice());
|
||||
println!(" ✓ Decryption verified: plaintext matches original\n");
|
||||
|
||||
// Step 11: List all keys
|
||||
println!("11. Listing all keys...");
|
||||
let list_request = ListKeysRequest {
|
||||
limit: Some(10),
|
||||
marker: None,
|
||||
usage_filter: None,
|
||||
status_filter: None,
|
||||
};
|
||||
let list_response = encryption_service.list_keys(list_request).await?;
|
||||
println!(" ✓ Keys found: {}", list_response.keys.len());
|
||||
for (idx, key_info) in list_response.keys.iter().enumerate() {
|
||||
println!(" {}. {} ({:?})", idx + 1, key_info.key_id, key_info.status);
|
||||
}
|
||||
println!();
|
||||
|
||||
// Step 12: Check cache statistics
|
||||
println!("12. Checking cache statistics...");
|
||||
if let Some((hits, misses)) = encryption_service.cache_stats().await {
|
||||
println!(" ✓ Cache statistics:");
|
||||
println!(" - Cache hits: {}", hits);
|
||||
println!(" - Cache misses: {}\n", misses);
|
||||
} else {
|
||||
println!(" - Cache is disabled\n");
|
||||
}
|
||||
|
||||
// Step 13: Health check
|
||||
println!("13. Performing health check...");
|
||||
let is_healthy = encryption_service.health_check().await?;
|
||||
println!(" ✓ KMS backend is healthy: {}\n", is_healthy);
|
||||
|
||||
// Step 14: Stop the service
|
||||
println!("14. Stopping KMS service...");
|
||||
service_manager.stop().await?;
|
||||
println!(" ✓ KMS service stopped\n");
|
||||
|
||||
println!("=== Demo completed successfully! ===");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,292 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! KMS Demo 2 - Comprehensive example demonstrating RustFS KMS with Vault backend
|
||||
//!
|
||||
//! This example demonstrates:
|
||||
//! - Initializing and configuring KMS service with Vault backend
|
||||
//! - Creating master keys stored in Vault
|
||||
//! - Generating data encryption keys
|
||||
//! - Encrypting and decrypting data using high-level APIs
|
||||
//! - Key management operations with Vault
|
||||
//! - Cache statistics
|
||||
//!
|
||||
//! Prerequisites:
|
||||
//! - Vault server running at http://127.0.0.1:8200 (or set RUSTFS_KMS_VAULT_ADDRESS)
|
||||
//! - Vault token (set RUSTFS_KMS_VAULT_TOKEN environment variable, or use default "dev-token" for dev mode)
|
||||
//!
|
||||
//! Run with: `cargo run --example demo2`
|
||||
//! Or with custom Vault settings:
|
||||
//! RUSTFS_KMS_VAULT_ADDRESS=http://127.0.0.1:8200 RUSTFS_KMS_VAULT_TOKEN=your-token cargo run --example demo2
|
||||
|
||||
use rustfs_kms::{
|
||||
CreateKeyRequest, DescribeKeyRequest, EncryptionAlgorithm, GenerateDataKeyRequest, KeySpec, KeyUsage, KmsConfig, KmsError,
|
||||
ListKeysRequest, init_global_kms_service_manager,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use url::Url;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Note: Tracing is optional - if tracing-subscriber is not available,
|
||||
// the example will still work but with less detailed logging
|
||||
|
||||
println!("=== RustFS KMS Demo 2 (Vault Backend) ===\n");
|
||||
|
||||
// Step 1: Initialize global KMS service manager
|
||||
println!("1. Initializing KMS service manager...");
|
||||
let service_manager = init_global_kms_service_manager();
|
||||
println!(" ✓ Service manager initialized\n");
|
||||
|
||||
// Step 2: Get Vault configuration from environment or use defaults
|
||||
println!("2. Configuring Vault backend...");
|
||||
let vault_address = std::env::var("RUSTFS_KMS_VAULT_ADDRESS").unwrap_or_else(|_| "http://127.0.0.1:8200".to_string());
|
||||
let vault_token = std::env::var("RUSTFS_KMS_VAULT_TOKEN").unwrap_or_else(|_| {
|
||||
println!(" ⚠️ No RUSTFS_KMS_VAULT_TOKEN found, using default 'dev-token'");
|
||||
println!(" For production, set RUSTFS_KMS_VAULT_TOKEN environment variable");
|
||||
"dev-token".to_string()
|
||||
});
|
||||
|
||||
let vault_url = Url::parse(&vault_address).map_err(|e| format!("Invalid Vault address '{}': {}", vault_address, e))?;
|
||||
|
||||
println!(" ✓ Vault address: {}", vault_address);
|
||||
println!(" ✓ Using token authentication\n");
|
||||
|
||||
// Step 3: Configure KMS with Vault backend
|
||||
println!("3. Configuring KMS with Vault backend...");
|
||||
let config = KmsConfig::vault(vault_url, vault_token)
|
||||
.with_default_key("demo-key-master-1".to_string())
|
||||
.with_cache(true);
|
||||
service_manager.configure(config).await?;
|
||||
println!(" ✓ KMS configured with Vault backend\n");
|
||||
|
||||
// Step 4: Start the KMS service
|
||||
println!("4. Starting KMS service...");
|
||||
service_manager.start().await?;
|
||||
println!(" ✓ KMS service started\n");
|
||||
|
||||
// Step 5: Get the encryption service
|
||||
println!("5. Getting encryption service...");
|
||||
let encryption_service = rustfs_kms::get_global_encryption_service()
|
||||
.await
|
||||
.ok_or("Encryption service not available")?;
|
||||
println!(" ✓ Encryption service obtained\n");
|
||||
|
||||
// Step 6: Create a master key (stored in Vault) or use existing one
|
||||
println!("6. Checking for existing master key in Vault...");
|
||||
let master_key_id = "demo-key-master-1".to_string();
|
||||
let describe_request = DescribeKeyRequest {
|
||||
key_id: master_key_id.clone(),
|
||||
};
|
||||
|
||||
let master_key_id = match encryption_service.describe_key(describe_request).await {
|
||||
Ok(describe_response) => {
|
||||
// Key already exists, use it
|
||||
println!(" ✓ Master key already exists in Vault:");
|
||||
println!(" - Key ID: {}", describe_response.key_metadata.key_id);
|
||||
println!(" - Key State: {:?}", describe_response.key_metadata.key_state);
|
||||
println!(" - Key Usage: {:?}", describe_response.key_metadata.key_usage);
|
||||
println!(" - Created: {}\n", describe_response.key_metadata.creation_date);
|
||||
describe_response.key_metadata.key_id
|
||||
}
|
||||
Err(KmsError::KeyNotFound { .. }) => {
|
||||
// Key doesn't exist, create it
|
||||
println!(" Key not found, creating new master key in Vault...");
|
||||
let create_request = CreateKeyRequest {
|
||||
key_name: Some(master_key_id.clone()),
|
||||
key_usage: KeyUsage::EncryptDecrypt,
|
||||
description: Some("Demo master key for encryption (stored in Vault)".to_string()),
|
||||
policy: None,
|
||||
tags: {
|
||||
let mut tags = HashMap::new();
|
||||
tags.insert("environment".to_string(), "demo".to_string());
|
||||
tags.insert("purpose".to_string(), "testing".to_string());
|
||||
tags.insert("backend".to_string(), "vault".to_string());
|
||||
tags
|
||||
},
|
||||
origin: Some("demo2.rs".to_string()),
|
||||
};
|
||||
|
||||
let create_response = encryption_service.create_key(create_request).await?;
|
||||
println!(" ✓ Master key created in Vault:");
|
||||
println!(" - Key ID: {}", create_response.key_id);
|
||||
println!(" - Key State: {:?}", create_response.key_metadata.key_state);
|
||||
println!(" - Key Usage: {:?}", create_response.key_metadata.key_usage);
|
||||
println!(" - Created: {}\n", create_response.key_metadata.creation_date);
|
||||
create_response.key_id
|
||||
}
|
||||
Err(e) => {
|
||||
// Other error, return it
|
||||
return Err(Box::new(e) as Box<dyn std::error::Error>);
|
||||
}
|
||||
};
|
||||
|
||||
// Step 7: Describe the key (retrieved from Vault)
|
||||
println!("7. Describing the master key (from Vault)...");
|
||||
let describe_request = DescribeKeyRequest {
|
||||
key_id: master_key_id.clone(),
|
||||
};
|
||||
let describe_response = encryption_service.describe_key(describe_request).await?;
|
||||
let metadata = describe_response.key_metadata;
|
||||
println!(" ✓ Key details (from Vault):");
|
||||
println!(" - Key ID: {}", metadata.key_id);
|
||||
println!(" - Description: {:?}", metadata.description);
|
||||
println!(" - Key Usage: {:?}", metadata.key_usage);
|
||||
println!(" - Key State: {:?}", metadata.key_state);
|
||||
println!(" - Tags: {:?}\n", metadata.tags);
|
||||
|
||||
// Step 8: Generate a data encryption key (OPTIONAL - for demonstration only)
|
||||
// NOTE: This step is OPTIONAL and only for educational purposes!
|
||||
// In real usage, you can skip this step and go directly to Step 9.
|
||||
// encrypt_object() will automatically generate a data key internally.
|
||||
println!("8. [OPTIONAL] Generating a data encryption key (for demonstration)...");
|
||||
println!(" ⚠️ This step is OPTIONAL - only for understanding the two-layer key architecture:");
|
||||
println!(" - Master Key (CMK): Stored in Vault, used to encrypt/decrypt data keys");
|
||||
println!(" - Data Key (DEK): Generated per object, encrypted by master key");
|
||||
println!(" In production, you can skip this and use encrypt_object() directly!\n");
|
||||
|
||||
let data_key_request = GenerateDataKeyRequest {
|
||||
key_id: master_key_id.clone(),
|
||||
key_spec: KeySpec::Aes256,
|
||||
encryption_context: {
|
||||
let mut context = HashMap::new();
|
||||
context.insert("bucket".to_string(), "demo-bucket".to_string());
|
||||
context.insert("object_key".to_string(), "demo-object.txt".to_string());
|
||||
context
|
||||
},
|
||||
};
|
||||
|
||||
let data_key_response = encryption_service.generate_data_key(data_key_request).await?;
|
||||
println!(" ✓ Data key generated (for demonstration):");
|
||||
println!(" - Master Key ID: {}", data_key_response.key_id);
|
||||
println!(" - Data Key (plaintext) length: {} bytes", data_key_response.plaintext_key.len());
|
||||
println!(
|
||||
" - Encrypted Data Key (ciphertext blob) length: {} bytes",
|
||||
data_key_response.ciphertext_blob.len()
|
||||
);
|
||||
println!(" - Note: This data key is NOT used in Step 9 - encrypt_object() generates its own!\n");
|
||||
|
||||
// Step 9: Encrypt some data using high-level API
|
||||
// This is the RECOMMENDED way to encrypt data - everything is handled automatically!
|
||||
println!("9. Encrypting data using object encryption service (RECOMMENDED)...");
|
||||
println!(" ✅ This is all you need! encrypt_object() handles everything:");
|
||||
println!(" 1. Validates/creates the master key in Vault (if needed)");
|
||||
println!(" 2. Generates a NEW data key using the master key from Vault (independent of Step 8)");
|
||||
println!(" 3. Uses the data key to encrypt the actual data");
|
||||
println!(" 4. Stores the encrypted data key (ciphertext blob) in metadata");
|
||||
println!(" You only need to provide the master_key_id - everything else is handled!\n");
|
||||
|
||||
let plaintext = b"Hello, RustFS KMS with Vault! This is a test message for encryption.";
|
||||
println!(" Plaintext: {}", String::from_utf8_lossy(plaintext));
|
||||
|
||||
let reader = Cursor::new(plaintext);
|
||||
// Just provide the master_key_id - encrypt_object() handles everything internally!
|
||||
let encryption_result = encryption_service
|
||||
.encrypt_object(
|
||||
"demo-bucket",
|
||||
"demo-object.txt",
|
||||
reader,
|
||||
&EncryptionAlgorithm::Aes256,
|
||||
Some(&master_key_id), // Only need to provide master key ID
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!(" ✓ Data encrypted:");
|
||||
println!(" - Encrypted data length: {} bytes", encryption_result.ciphertext.len());
|
||||
println!(" - Algorithm: {}", encryption_result.metadata.algorithm);
|
||||
println!(
|
||||
" - Master Key ID: {} (stored in Vault, used to encrypt the data key)",
|
||||
encryption_result.metadata.key_id
|
||||
);
|
||||
println!(
|
||||
" - Encrypted Data Key length: {} bytes (stored in metadata)",
|
||||
encryption_result.metadata.encrypted_data_key.len()
|
||||
);
|
||||
println!(" - Original size: {} bytes\n", encryption_result.metadata.original_size);
|
||||
|
||||
// Step 10: Decrypt the data using high-level API
|
||||
println!("10. Decrypting data...");
|
||||
println!(" Note: decrypt_object() has the ENTIRE decryption flow built-in:");
|
||||
println!(" 1. Extracts the encrypted data key from metadata");
|
||||
println!(" 2. Uses master key from Vault to decrypt the data key");
|
||||
println!(" 3. Uses the decrypted data key to decrypt the actual data");
|
||||
println!(" You only need to provide the encrypted data and metadata!\n");
|
||||
|
||||
let mut decrypted_reader = encryption_service
|
||||
.decrypt_object(
|
||||
"demo-bucket",
|
||||
"demo-object.txt",
|
||||
encryption_result.ciphertext.clone(),
|
||||
&encryption_result.metadata, // Contains everything needed for decryption
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut decrypted_data = Vec::new();
|
||||
decrypted_reader.read_to_end(&mut decrypted_data).await?;
|
||||
|
||||
println!(" ✓ Data decrypted:");
|
||||
println!(" - Decrypted text: {}\n", String::from_utf8_lossy(&decrypted_data));
|
||||
|
||||
// Verify decryption
|
||||
assert_eq!(plaintext, decrypted_data.as_slice());
|
||||
println!(" ✓ Decryption verified: plaintext matches original\n");
|
||||
|
||||
// Step 11: List all keys (from Vault)
|
||||
println!("11. Listing all keys (from Vault)...");
|
||||
let list_request = ListKeysRequest {
|
||||
limit: Some(10),
|
||||
marker: None,
|
||||
usage_filter: None,
|
||||
status_filter: None,
|
||||
};
|
||||
let list_response = encryption_service.list_keys(list_request).await?;
|
||||
println!(" ✓ Keys found in Vault: {}", list_response.keys.len());
|
||||
for (idx, key_info) in list_response.keys.iter().enumerate() {
|
||||
println!(" {}. {} ({:?})", idx + 1, key_info.key_id, key_info.status);
|
||||
}
|
||||
println!();
|
||||
|
||||
// Step 12: Check cache statistics
|
||||
println!("12. Checking cache statistics...");
|
||||
if let Some((hits, misses)) = encryption_service.cache_stats().await {
|
||||
println!(" ✓ Cache statistics:");
|
||||
println!(" - Cache hits: {}", hits);
|
||||
println!(" - Cache misses: {}\n", misses);
|
||||
} else {
|
||||
println!(" - Cache is disabled\n");
|
||||
}
|
||||
|
||||
// Step 13: Health check (verifies Vault connectivity)
|
||||
println!("13. Performing health check (Vault connectivity)...");
|
||||
let is_healthy = encryption_service.health_check().await?;
|
||||
println!(" ✓ KMS backend (Vault) is healthy: {}\n", is_healthy);
|
||||
|
||||
// Step 14: Stop the service
|
||||
println!("14. Stopping KMS service...");
|
||||
service_manager.stop().await?;
|
||||
println!(" ✓ KMS service stopped\n");
|
||||
|
||||
println!("=== Demo 2 (Vault Backend) completed successfully! ===");
|
||||
println!("\n💡 Tips:");
|
||||
println!(" - Keys are now stored in Vault at: {}/v1/secret/data/rustfs/kms/keys/", vault_address);
|
||||
println!(" - You can verify keys in Vault using: vault kv list secret/rustfs/kms/keys/");
|
||||
println!(" - For production, use proper Vault authentication (AppRole, etc.)");
|
||||
println!(" - See examples/VAULT_SETUP.md for detailed Vault configuration guide");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -17,7 +17,6 @@
|
||||
use crate::backends::{BackendInfo, KmsBackend, KmsClient};
|
||||
use crate::config::KmsConfig;
|
||||
use crate::config::LocalConfig;
|
||||
use crate::encryption::{AesDekCrypto, DataKeyEnvelope, DekCrypto, generate_key_material};
|
||||
use crate::error::{KmsError, Result};
|
||||
use crate::types::*;
|
||||
use aes_gcm::{
|
||||
@@ -25,7 +24,6 @@ use aes_gcm::{
|
||||
aead::{Aead, KeyInit},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64};
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
@@ -38,11 +36,9 @@ use tracing::{debug, info, warn};
|
||||
pub struct LocalKmsClient {
|
||||
config: LocalConfig,
|
||||
/// In-memory cache of loaded keys for performance
|
||||
key_cache: RwLock<HashMap<String, MasterKeyInfo>>,
|
||||
key_cache: RwLock<HashMap<String, MasterKey>>,
|
||||
/// Master encryption key for encrypting stored keys
|
||||
master_cipher: Option<Aes256Gcm>,
|
||||
/// DEK encryption implementation
|
||||
dek_crypto: AesDekCrypto,
|
||||
}
|
||||
|
||||
/// Serializable representation of a master key stored on disk
|
||||
@@ -58,12 +54,24 @@ struct StoredMasterKey {
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
rotated_at: Option<chrono::DateTime<chrono::Utc>>,
|
||||
created_by: Option<String>,
|
||||
/// Encrypted key material (32 bytes encoded in base64 for AES-256)
|
||||
encrypted_key_material: String,
|
||||
/// Encrypted key material (32 bytes for AES-256)
|
||||
encrypted_key_material: Vec<u8>,
|
||||
/// Nonce used for encryption
|
||||
nonce: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Data key envelope stored with each data key generation
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct DataKeyEnvelope {
|
||||
key_id: String,
|
||||
master_key_id: String,
|
||||
key_spec: String,
|
||||
encrypted_key: Vec<u8>,
|
||||
nonce: Vec<u8>,
|
||||
encryption_context: HashMap<String, String>,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
impl LocalKmsClient {
|
||||
/// Create a new local KMS client
|
||||
pub async fn new(config: LocalConfig) -> Result<Self> {
|
||||
@@ -86,7 +94,6 @@ impl LocalKmsClient {
|
||||
config,
|
||||
key_cache: RwLock::new(HashMap::new()),
|
||||
master_cipher,
|
||||
dek_crypto: AesDekCrypto::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -108,8 +115,8 @@ impl LocalKmsClient {
|
||||
self.config.key_dir.join(format!("{key_id}.key"))
|
||||
}
|
||||
|
||||
/// Decode and decrypt a stored key file, returning both the metadata and decrypted key material
|
||||
async fn decode_stored_key(&self, key_id: &str) -> Result<(StoredMasterKey, Vec<u8>)> {
|
||||
/// Load a master key from disk
|
||||
async fn load_master_key(&self, key_id: &str) -> Result<MasterKey> {
|
||||
let key_path = self.master_key_path(key_id);
|
||||
if !key_path.exists() {
|
||||
return Err(KmsError::key_not_found(key_id));
|
||||
@@ -119,7 +126,7 @@ impl LocalKmsClient {
|
||||
let stored_key: StoredMasterKey = serde_json::from_slice(&content)?;
|
||||
|
||||
// Decrypt key material if master cipher is available
|
||||
let key_material = if let Some(ref cipher) = self.master_cipher {
|
||||
let _key_material = if let Some(ref cipher) = self.master_cipher {
|
||||
if stored_key.nonce.len() != 12 {
|
||||
return Err(KmsError::cryptographic_error("nonce", "Invalid nonce length"));
|
||||
}
|
||||
@@ -128,29 +135,14 @@ impl LocalKmsClient {
|
||||
nonce_array.copy_from_slice(&stored_key.nonce);
|
||||
let nonce = Nonce::from(nonce_array);
|
||||
|
||||
// Decode base64 string to bytes
|
||||
let encrypted_bytes = BASE64
|
||||
.decode(&stored_key.encrypted_key_material)
|
||||
.map_err(|e| KmsError::cryptographic_error("base64_decode", e.to_string()))?;
|
||||
|
||||
cipher
|
||||
.decrypt(&nonce, encrypted_bytes.as_ref())
|
||||
.decrypt(&nonce, stored_key.encrypted_key_material.as_ref())
|
||||
.map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))?
|
||||
} else {
|
||||
// Decode base64 string to bytes when no encryption
|
||||
BASE64
|
||||
.decode(&stored_key.encrypted_key_material)
|
||||
.map_err(|e| KmsError::cryptographic_error("base64_decode", e.to_string()))?
|
||||
stored_key.encrypted_key_material
|
||||
};
|
||||
|
||||
Ok((stored_key, key_material))
|
||||
}
|
||||
|
||||
/// Load a master key from disk
|
||||
async fn load_master_key(&self, key_id: &str) -> Result<MasterKeyInfo> {
|
||||
let (stored_key, _key_material) = self.decode_stored_key(key_id).await?;
|
||||
|
||||
Ok(MasterKeyInfo {
|
||||
Ok(MasterKey {
|
||||
key_id: stored_key.key_id,
|
||||
version: stored_key.version,
|
||||
algorithm: stored_key.algorithm,
|
||||
@@ -165,7 +157,7 @@ impl LocalKmsClient {
|
||||
}
|
||||
|
||||
/// Save a master key to disk
|
||||
async fn save_master_key(&self, master_key: &MasterKeyInfo, key_material: &[u8]) -> Result<()> {
|
||||
async fn save_master_key(&self, master_key: &MasterKey, key_material: &[u8]) -> Result<()> {
|
||||
let key_path = self.master_key_path(&master_key.key_id);
|
||||
|
||||
// Encrypt key material if master cipher is available
|
||||
@@ -177,11 +169,9 @@ impl LocalKmsClient {
|
||||
let encrypted = cipher
|
||||
.encrypt(&nonce, key_material)
|
||||
.map_err(|e| KmsError::cryptographic_error("encrypt", e.to_string()))?;
|
||||
// Encode encrypted bytes to base64 string
|
||||
(BASE64.encode(&encrypted), nonce.to_vec())
|
||||
(encrypted, nonce.to_vec())
|
||||
} else {
|
||||
// Encode key material to base64 string when no encryption
|
||||
(BASE64.encode(key_material), Vec::new())
|
||||
(key_material.to_vec(), Vec::new())
|
||||
};
|
||||
|
||||
let stored_key = StoredMasterKey {
|
||||
@@ -219,9 +209,39 @@ impl LocalKmsClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate a random 256-bit key
|
||||
fn generate_key_material() -> Vec<u8> {
|
||||
let mut key_material = vec![0u8; 32]; // 256 bits
|
||||
rand::rng().fill(&mut key_material[..]);
|
||||
key_material
|
||||
}
|
||||
|
||||
/// Get the actual key material for a master key
|
||||
async fn get_key_material(&self, key_id: &str) -> Result<Vec<u8>> {
|
||||
let (_stored_key, key_material) = self.decode_stored_key(key_id).await?;
|
||||
let key_path = self.master_key_path(key_id);
|
||||
|
||||
if !key_path.exists() {
|
||||
return Err(KmsError::key_not_found(key_id));
|
||||
}
|
||||
|
||||
let content = fs::read(&key_path).await?;
|
||||
let stored_key: StoredMasterKey = serde_json::from_slice(&content)?;
|
||||
|
||||
// Decrypt key material if master cipher is available
|
||||
let key_material = if let Some(ref cipher) = self.master_cipher {
|
||||
if stored_key.nonce.len() != 12 {
|
||||
return Err(KmsError::cryptographic_error("nonce", "Invalid nonce length"));
|
||||
}
|
||||
let mut nonce_array = [0u8; 12];
|
||||
nonce_array.copy_from_slice(&stored_key.nonce);
|
||||
let nonce = Nonce::from(nonce_array);
|
||||
cipher
|
||||
.decrypt(&nonce, stored_key.encrypted_key_material.as_ref())
|
||||
.map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))?
|
||||
} else {
|
||||
stored_key.encrypted_key_material
|
||||
};
|
||||
|
||||
Ok(key_material)
|
||||
}
|
||||
|
||||
@@ -229,22 +249,53 @@ impl LocalKmsClient {
|
||||
async fn encrypt_with_master_key(&self, key_id: &str, plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>)> {
|
||||
// Load the actual master key material
|
||||
let key_material = self.get_key_material(key_id).await?;
|
||||
self.dek_crypto.encrypt(&key_material, plaintext).await
|
||||
let key = Key::<Aes256Gcm>::try_from(key_material.as_slice())
|
||||
.map_err(|_| KmsError::cryptographic_error("key", "Invalid key length"))?;
|
||||
let cipher = Aes256Gcm::new(&key);
|
||||
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::rng().fill(&mut nonce_bytes[..]);
|
||||
|
||||
let nonce = Nonce::from(nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(&nonce, plaintext)
|
||||
.map_err(|e| KmsError::cryptographic_error("encrypt", e.to_string()))?;
|
||||
|
||||
Ok((ciphertext, nonce_bytes.to_vec()))
|
||||
}
|
||||
|
||||
/// Decrypt data using a master key
|
||||
async fn decrypt_with_master_key(&self, key_id: &str, ciphertext: &[u8], nonce: &[u8]) -> Result<Vec<u8>> {
|
||||
if nonce.len() != 12 {
|
||||
return Err(KmsError::cryptographic_error("nonce", "Invalid nonce length"));
|
||||
}
|
||||
// Load the actual master key material
|
||||
let key_material = self.get_key_material(key_id).await?;
|
||||
self.dek_crypto.decrypt(&key_material, ciphertext, nonce).await
|
||||
let key = Key::<Aes256Gcm>::try_from(key_material.as_slice())
|
||||
.map_err(|_| KmsError::cryptographic_error("key", "Invalid key length"))?;
|
||||
let cipher = Aes256Gcm::new(&key);
|
||||
|
||||
let mut nonce_array = [0u8; 12];
|
||||
nonce_array.copy_from_slice(nonce);
|
||||
let nonce_ref = Nonce::from(nonce_array);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(&nonce_ref, ciphertext)
|
||||
.map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl KmsClient for LocalKmsClient {
|
||||
async fn generate_data_key(&self, request: &GenerateKeyRequest, _context: Option<&OperationContext>) -> Result<DataKeyInfo> {
|
||||
async fn generate_data_key(&self, request: &GenerateKeyRequest, context: Option<&OperationContext>) -> Result<DataKey> {
|
||||
debug!("Generating data key for master key: {}", request.master_key_id);
|
||||
|
||||
// Verify master key exists
|
||||
let _master_key = self.describe_key(&request.master_key_id, context).await?;
|
||||
|
||||
// Generate random data key material
|
||||
let key_length = match request.key_spec.as_str() {
|
||||
"AES_256" => 32,
|
||||
@@ -258,7 +309,7 @@ impl KmsClient for LocalKmsClient {
|
||||
// Encrypt the data key with the master key
|
||||
let (encrypted_key, nonce) = self.encrypt_with_master_key(&request.master_key_id, &plaintext_key).await?;
|
||||
|
||||
// Create data key envelope with master key version for rotation support
|
||||
// Create data key envelope
|
||||
let envelope = DataKeyEnvelope {
|
||||
key_id: uuid::Uuid::new_v4().to_string(),
|
||||
master_key_id: request.master_key_id.clone(),
|
||||
@@ -272,7 +323,7 @@ impl KmsClient for LocalKmsClient {
|
||||
// Serialize the envelope as the ciphertext
|
||||
let ciphertext = serde_json::to_vec(&envelope)?;
|
||||
|
||||
let data_key = DataKeyInfo::new(envelope.key_id, 1, Some(plaintext_key), ciphertext, request.key_spec.clone());
|
||||
let data_key = DataKey::new(envelope.key_id, 1, Some(plaintext_key), ciphertext, request.key_spec.clone());
|
||||
|
||||
info!("Generated data key for master key: {}", request.master_key_id);
|
||||
Ok(data_key)
|
||||
@@ -307,19 +358,15 @@ impl KmsClient for LocalKmsClient {
|
||||
let envelope: DataKeyEnvelope = serde_json::from_slice(&request.ciphertext)?;
|
||||
|
||||
// Verify encryption context matches
|
||||
// Check that all keys in envelope.encryption_context are present in request.encryption_context
|
||||
// and their values match. This ensures the context used for decryption matches what was used for encryption.
|
||||
for (key, expected_value) in &envelope.encryption_context {
|
||||
if let Some(actual_value) = request.encryption_context.get(key) {
|
||||
if actual_value != expected_value {
|
||||
return Err(KmsError::context_mismatch(format!(
|
||||
"Context mismatch for key '{key}': expected '{expected_value}', got '{actual_value}'"
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
// If request.encryption_context is empty, allow decryption (backward compatibility)
|
||||
// Otherwise, require all envelope context keys to be present
|
||||
if !request.encryption_context.is_empty() {
|
||||
if !request.encryption_context.is_empty() {
|
||||
for (key, expected_value) in &request.encryption_context {
|
||||
if let Some(actual_value) = envelope.encryption_context.get(key) {
|
||||
if actual_value != expected_value {
|
||||
return Err(KmsError::context_mismatch(format!(
|
||||
"Context mismatch for key '{key}': expected '{expected_value}', got '{actual_value}'"
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
return Err(KmsError::context_mismatch(format!("Missing context key '{key}'")));
|
||||
}
|
||||
}
|
||||
@@ -334,7 +381,7 @@ impl KmsClient for LocalKmsClient {
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
async fn create_key(&self, key_id: &str, algorithm: &str, context: Option<&OperationContext>) -> Result<MasterKeyInfo> {
|
||||
async fn create_key(&self, key_id: &str, algorithm: &str, context: Option<&OperationContext>) -> Result<MasterKey> {
|
||||
debug!("Creating master key: {}", key_id);
|
||||
|
||||
// Check if key already exists
|
||||
@@ -348,13 +395,13 @@ impl KmsClient for LocalKmsClient {
|
||||
}
|
||||
|
||||
// Generate key material
|
||||
let key_material = generate_key_material(algorithm)?;
|
||||
let key_material = Self::generate_key_material();
|
||||
|
||||
let created_by = context
|
||||
.map(|ctx| ctx.principal.clone())
|
||||
.unwrap_or_else(|| "local-kms".to_string());
|
||||
|
||||
let master_key = MasterKeyInfo::new_with_description(key_id.to_string(), algorithm.to_string(), Some(created_by), None);
|
||||
let master_key = MasterKey::new_with_description(key_id.to_string(), algorithm.to_string(), Some(created_by), None);
|
||||
|
||||
// Save to disk
|
||||
self.save_master_key(&master_key, &key_material).await?;
|
||||
@@ -442,7 +489,7 @@ impl KmsClient for LocalKmsClient {
|
||||
|
||||
// For simplicity, we'll regenerate key material
|
||||
// In a real implementation, we'd preserve the original key material
|
||||
let key_material = generate_key_material(&master_key.algorithm)?;
|
||||
let key_material = Self::generate_key_material();
|
||||
self.save_master_key(&master_key, &key_material).await?;
|
||||
|
||||
// Update cache
|
||||
@@ -459,7 +506,7 @@ impl KmsClient for LocalKmsClient {
|
||||
let mut master_key = self.load_master_key(key_id).await?;
|
||||
master_key.status = KeyStatus::Disabled;
|
||||
|
||||
let key_material = generate_key_material(&master_key.algorithm)?;
|
||||
let key_material = Self::generate_key_material();
|
||||
self.save_master_key(&master_key, &key_material).await?;
|
||||
|
||||
// Update cache
|
||||
@@ -481,7 +528,7 @@ impl KmsClient for LocalKmsClient {
|
||||
let mut master_key = self.load_master_key(key_id).await?;
|
||||
master_key.status = KeyStatus::PendingDeletion;
|
||||
|
||||
let key_material = generate_key_material(&master_key.algorithm)?;
|
||||
let key_material = Self::generate_key_material();
|
||||
self.save_master_key(&master_key, &key_material).await?;
|
||||
|
||||
// Update cache
|
||||
@@ -498,7 +545,7 @@ impl KmsClient for LocalKmsClient {
|
||||
let mut master_key = self.load_master_key(key_id).await?;
|
||||
master_key.status = KeyStatus::Active;
|
||||
|
||||
let key_material = generate_key_material(&master_key.algorithm)?;
|
||||
let key_material = Self::generate_key_material();
|
||||
self.save_master_key(&master_key, &key_material).await?;
|
||||
|
||||
// Update cache
|
||||
@@ -509,7 +556,7 @@ impl KmsClient for LocalKmsClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn rotate_key(&self, key_id: &str, _context: Option<&OperationContext>) -> Result<MasterKeyInfo> {
|
||||
async fn rotate_key(&self, key_id: &str, _context: Option<&OperationContext>) -> Result<MasterKey> {
|
||||
debug!("Rotating key: {}", key_id);
|
||||
|
||||
let mut master_key = self.load_master_key(key_id).await?;
|
||||
@@ -517,7 +564,7 @@ impl KmsClient for LocalKmsClient {
|
||||
master_key.rotated_at = Some(chrono::Utc::now());
|
||||
|
||||
// Generate new key material
|
||||
let key_material = generate_key_material(&master_key.algorithm)?;
|
||||
let key_material = Self::generate_key_material();
|
||||
self.save_master_key(&master_key, &key_material).await?;
|
||||
|
||||
// Update cache
|
||||
@@ -577,13 +624,12 @@ impl KmsBackend for LocalKmsBackend {
|
||||
|
||||
// Create master key with description directly
|
||||
let _master_key = {
|
||||
let algorithm = "AES_256";
|
||||
// Generate key material
|
||||
let key_material = generate_key_material(algorithm)?;
|
||||
let key_material = LocalKmsClient::generate_key_material();
|
||||
|
||||
let master_key = MasterKeyInfo::new_with_description(
|
||||
let master_key = MasterKey::new_with_description(
|
||||
key_id.clone(),
|
||||
algorithm.to_string(),
|
||||
"AES_256".to_string(),
|
||||
Some("local-kms".to_string()),
|
||||
request.description.clone(),
|
||||
);
|
||||
@@ -747,12 +793,28 @@ impl KmsBackend for LocalKmsBackend {
|
||||
};
|
||||
|
||||
// Save the updated key to disk - preserve existing key material!
|
||||
// Load and decode the stored key to get the existing key material
|
||||
let (_stored_key, existing_key_material) = self
|
||||
.client
|
||||
.decode_stored_key(key_id)
|
||||
// Load the stored key from disk to get the existing key material
|
||||
let key_path = self.client.master_key_path(key_id);
|
||||
let content = tokio::fs::read(&key_path)
|
||||
.await
|
||||
.map_err(|e| KmsError::internal_error(format!("Failed to decode key: {e}")))?;
|
||||
.map_err(|e| KmsError::internal_error(format!("Failed to read key file: {e}")))?;
|
||||
let stored_key: StoredMasterKey =
|
||||
serde_json::from_slice(&content).map_err(|e| KmsError::internal_error(format!("Failed to parse stored key: {e}")))?;
|
||||
|
||||
// Decrypt the existing key material to preserve it
|
||||
let existing_key_material = if let Some(ref cipher) = self.client.master_cipher {
|
||||
if stored_key.nonce.len() != 12 {
|
||||
return Err(KmsError::cryptographic_error("nonce", "Invalid nonce length"));
|
||||
}
|
||||
let mut nonce_array = [0u8; 12];
|
||||
nonce_array.copy_from_slice(&stored_key.nonce);
|
||||
let nonce = Nonce::from(nonce_array);
|
||||
cipher
|
||||
.decrypt(&nonce, stored_key.encrypted_key_material.as_ref())
|
||||
.map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))?
|
||||
} else {
|
||||
stored_key.encrypted_key_material
|
||||
};
|
||||
|
||||
self.client.save_master_key(&master_key, &existing_key_material).await?;
|
||||
|
||||
@@ -798,14 +860,8 @@ impl KmsBackend for LocalKmsBackend {
|
||||
master_key.status = KeyStatus::Active;
|
||||
|
||||
// Save the updated key to disk - this is the missing critical step!
|
||||
// Preserve existing key material instead of generating new one
|
||||
let (_stored_key, existing_key_material) = self
|
||||
.client
|
||||
.decode_stored_key(key_id)
|
||||
.await
|
||||
.map_err(|e| KmsError::internal_error(format!("Failed to decode key: {e}")))?;
|
||||
|
||||
self.client.save_master_key(&master_key, &existing_key_material).await?;
|
||||
let key_material = LocalKmsClient::generate_key_material();
|
||||
self.client.save_master_key(&master_key, &key_material).await?;
|
||||
|
||||
// Update cache
|
||||
let mut cache = self.client.key_cache.write().await;
|
||||
|
||||
@@ -36,7 +36,7 @@ pub trait KmsClient: Send + Sync {
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns a DataKey containing both plaintext and encrypted key material
|
||||
async fn generate_data_key(&self, request: &GenerateKeyRequest, context: Option<&OperationContext>) -> Result<DataKeyInfo>;
|
||||
async fn generate_data_key(&self, request: &GenerateKeyRequest, context: Option<&OperationContext>) -> Result<DataKey>;
|
||||
|
||||
/// Encrypt data directly using a master key
|
||||
///
|
||||
@@ -67,7 +67,7 @@ pub trait KmsClient: Send + Sync {
|
||||
/// * `key_id` - Unique identifier for the new key
|
||||
/// * `algorithm` - Key algorithm (e.g., "AES_256")
|
||||
/// * `context` - Optional operation context for auditing
|
||||
async fn create_key(&self, key_id: &str, algorithm: &str, context: Option<&OperationContext>) -> Result<MasterKeyInfo>;
|
||||
async fn create_key(&self, key_id: &str, algorithm: &str, context: Option<&OperationContext>) -> Result<MasterKey>;
|
||||
|
||||
/// Get information about a specific key
|
||||
///
|
||||
@@ -139,7 +139,7 @@ pub trait KmsClient: Send + Sync {
|
||||
/// # Arguments
|
||||
/// * `key_id` - The key identifier
|
||||
/// * `context` - Optional operation context for auditing
|
||||
async fn rotate_key(&self, key_id: &str, context: Option<&OperationContext>) -> Result<MasterKeyInfo>;
|
||||
async fn rotate_key(&self, key_id: &str, context: Option<&OperationContext>) -> Result<MasterKey>;
|
||||
|
||||
/// Health check
|
||||
///
|
||||
|
||||
@@ -16,11 +16,11 @@
|
||||
|
||||
use crate::backends::{BackendInfo, KmsBackend, KmsClient};
|
||||
use crate::config::{KmsConfig, VaultConfig};
|
||||
use crate::encryption::{AesDekCrypto, DataKeyEnvelope, DekCrypto, generate_key_material};
|
||||
use crate::error::{KmsError, Result};
|
||||
use crate::types::*;
|
||||
use async_trait::async_trait;
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use tracing::{debug, info, warn};
|
||||
@@ -37,8 +37,6 @@ pub struct VaultKmsClient {
|
||||
kv_mount: String,
|
||||
/// Path prefix for storing keys
|
||||
key_path_prefix: String,
|
||||
/// DEK encryption implementation
|
||||
dek_crypto: AesDekCrypto,
|
||||
}
|
||||
|
||||
/// Key data stored in Vault
|
||||
@@ -103,7 +101,6 @@ impl VaultKmsClient {
|
||||
kv_mount: config.kv_mount.clone(),
|
||||
key_path_prefix: config.key_path_prefix.clone(),
|
||||
config,
|
||||
dek_crypto: AesDekCrypto::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -112,6 +109,19 @@ impl VaultKmsClient {
|
||||
format!("{}/{}", self.key_path_prefix, key_id)
|
||||
}
|
||||
|
||||
/// Generate key material for the given algorithm
|
||||
fn generate_key_material(algorithm: &str) -> Result<Vec<u8>> {
|
||||
let key_size = match algorithm {
|
||||
"AES_256" => 32,
|
||||
"AES_128" => 16,
|
||||
_ => return Err(KmsError::unsupported_algorithm(algorithm)),
|
||||
};
|
||||
|
||||
let mut key_material = vec![0u8; key_size];
|
||||
rand::rng().fill_bytes(&mut key_material);
|
||||
Ok(key_material)
|
||||
}
|
||||
|
||||
/// Encrypt key material using Vault's transit engine
|
||||
async fn encrypt_key_material(&self, key_material: &[u8]) -> Result<String> {
|
||||
// For simplicity, we'll base64 encode the key material
|
||||
@@ -128,64 +138,6 @@ impl VaultKmsClient {
|
||||
.map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))
|
||||
}
|
||||
|
||||
/// Get the actual key material for a master key
|
||||
async fn get_key_material(&self, key_id: &str) -> Result<Vec<u8>> {
|
||||
let mut key_data = self.get_key_data(key_id).await?;
|
||||
|
||||
// If encrypted_key_material is empty, generate and store it (fix for old keys)
|
||||
if key_data.encrypted_key_material.is_empty() {
|
||||
warn!("Key {} has empty encrypted_key_material, generating and storing new key material", key_id);
|
||||
let key_material = generate_key_material(&key_data.algorithm)?;
|
||||
key_data.encrypted_key_material = self.encrypt_key_material(&key_material).await?;
|
||||
// Store the updated key data back to Vault
|
||||
self.store_key_data(key_id, &key_data).await?;
|
||||
return Ok(key_material);
|
||||
}
|
||||
|
||||
let key_material = match self.decrypt_key_material(&key_data.encrypted_key_material).await {
|
||||
Ok(km) => km,
|
||||
Err(e) => {
|
||||
warn!("Failed to decrypt key material for key {}: {}, generating new key material", key_id, e);
|
||||
let new_key_material = generate_key_material(&key_data.algorithm)?;
|
||||
key_data.encrypted_key_material = self.encrypt_key_material(&new_key_material).await?;
|
||||
// Store the updated key data back to Vault
|
||||
self.store_key_data(key_id, &key_data).await?;
|
||||
return Ok(new_key_material);
|
||||
}
|
||||
};
|
||||
|
||||
// Validate key material length (should be 32 bytes for AES-256)
|
||||
if key_material.len() != 32 {
|
||||
// Try to fix: generate new key material if length is wrong
|
||||
warn!(
|
||||
"Key {} has invalid key material length ({} bytes), generating new key material",
|
||||
key_id,
|
||||
key_material.len()
|
||||
);
|
||||
let new_key_material = generate_key_material(&key_data.algorithm)?;
|
||||
key_data.encrypted_key_material = self.encrypt_key_material(&new_key_material).await?;
|
||||
// Store the updated key data back to Vault
|
||||
self.store_key_data(key_id, &key_data).await?;
|
||||
return Ok(new_key_material);
|
||||
}
|
||||
|
||||
Ok(key_material)
|
||||
}
|
||||
|
||||
/// Encrypt data using a master key
|
||||
async fn encrypt_with_master_key(&self, key_id: &str, plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>)> {
|
||||
// Load the actual master key material
|
||||
let key_material = self.get_key_material(key_id).await?;
|
||||
self.dek_crypto.encrypt(&key_material, plaintext).await
|
||||
}
|
||||
|
||||
/// Decrypt data using a master key
|
||||
async fn decrypt_with_master_key(&self, key_id: &str, ciphertext: &[u8], nonce: &[u8]) -> Result<Vec<u8>> {
|
||||
// Load the actual master key material
|
||||
let key_material = self.get_key_material(key_id).await?;
|
||||
self.dek_crypto.decrypt(&key_material, ciphertext, nonce).await
|
||||
}
|
||||
|
||||
/// Store key data in Vault
|
||||
async fn store_key_data(&self, key_id: &str, key_data: &VaultKeyData) -> Result<()> {
|
||||
let path = self.key_path(key_id);
|
||||
@@ -201,36 +153,19 @@ impl VaultKmsClient {
|
||||
async fn store_key_metadata(&self, key_id: &str, request: &CreateKeyRequest) -> Result<()> {
|
||||
debug!("Storing key metadata for {}, input tags: {:?}", key_id, request.tags);
|
||||
|
||||
// Get existing key data to preserve encrypted_key_material and other fields
|
||||
// This is called after create_key, so the key should already exist
|
||||
let mut existing_key_data = self.get_key_data(key_id).await?;
|
||||
|
||||
// If encrypted_key_material is empty, generate it (this handles the case where
|
||||
// an old key was created without proper key material)
|
||||
if existing_key_data.encrypted_key_material.is_empty() {
|
||||
warn!("Key {} has empty encrypted_key_material, generating new key material", key_id);
|
||||
let key_material = generate_key_material(&existing_key_data.algorithm)?;
|
||||
existing_key_data.encrypted_key_material = self.encrypt_key_material(&key_material).await?;
|
||||
}
|
||||
|
||||
// Update only the metadata fields, preserving the encrypted_key_material
|
||||
let key_data = VaultKeyData {
|
||||
algorithm: existing_key_data.algorithm.clone(),
|
||||
algorithm: "AES_256".to_string(),
|
||||
usage: request.key_usage.clone(),
|
||||
created_at: existing_key_data.created_at,
|
||||
status: existing_key_data.status,
|
||||
version: existing_key_data.version,
|
||||
created_at: chrono::Utc::now(),
|
||||
status: KeyStatus::Active,
|
||||
version: 1,
|
||||
description: request.description.clone(),
|
||||
metadata: existing_key_data.metadata.clone(),
|
||||
metadata: HashMap::new(),
|
||||
tags: request.tags.clone(),
|
||||
encrypted_key_material: existing_key_data.encrypted_key_material.clone(), // Preserve the key material
|
||||
encrypted_key_material: String::new(), // Not used for transit keys
|
||||
};
|
||||
|
||||
debug!(
|
||||
"VaultKeyData tags before storage: {:?}, encrypted_key_material length: {}",
|
||||
key_data.tags,
|
||||
key_data.encrypted_key_material.len()
|
||||
);
|
||||
debug!("VaultKeyData tags before storage: {:?}", key_data.tags);
|
||||
self.store_key_data(key_id, &key_data).await
|
||||
}
|
||||
|
||||
@@ -289,33 +224,36 @@ impl VaultKmsClient {
|
||||
|
||||
#[async_trait]
|
||||
impl KmsClient for VaultKmsClient {
|
||||
async fn generate_data_key(&self, request: &GenerateKeyRequest, _context: Option<&OperationContext>) -> Result<DataKeyInfo> {
|
||||
async fn generate_data_key(&self, request: &GenerateKeyRequest, context: Option<&OperationContext>) -> Result<DataKey> {
|
||||
debug!("Generating data key for master key: {}", request.master_key_id);
|
||||
|
||||
// Generate random data key material using the existing method
|
||||
let plaintext_key = generate_key_material(&request.key_spec)?;
|
||||
// Verify master key exists
|
||||
let _master_key = self.describe_key(&request.master_key_id, context).await?;
|
||||
|
||||
// Encrypt the data key with the master key
|
||||
let (encrypted_key, nonce) = self.encrypt_with_master_key(&request.master_key_id, &plaintext_key).await?;
|
||||
|
||||
// Create data key envelope with master key version for rotation support
|
||||
let envelope = DataKeyEnvelope {
|
||||
key_id: uuid::Uuid::new_v4().to_string(),
|
||||
master_key_id: request.master_key_id.clone(),
|
||||
key_spec: request.key_spec.clone(),
|
||||
encrypted_key: encrypted_key.clone(),
|
||||
nonce,
|
||||
encryption_context: request.encryption_context.clone(),
|
||||
created_at: chrono::Utc::now(),
|
||||
// Generate data key material
|
||||
let key_length = match request.key_spec.as_str() {
|
||||
"AES_256" => 32,
|
||||
"AES_128" => 16,
|
||||
_ => return Err(KmsError::unsupported_algorithm(&request.key_spec)),
|
||||
};
|
||||
|
||||
// Serialize the envelope as the ciphertext
|
||||
let ciphertext = serde_json::to_vec(&envelope)?;
|
||||
let mut plaintext_key = vec![0u8; key_length];
|
||||
rand::rng().fill_bytes(&mut plaintext_key);
|
||||
|
||||
let data_key = DataKeyInfo::new(envelope.key_id, 1, Some(plaintext_key), ciphertext, request.key_spec.clone());
|
||||
// Encrypt the data key with the master key
|
||||
let encrypted_key = self.encrypt_key_material(&plaintext_key).await?;
|
||||
|
||||
info!("Generated data key for master key: {}", request.master_key_id);
|
||||
Ok(data_key)
|
||||
Ok(DataKey {
|
||||
key_id: request.master_key_id.clone(),
|
||||
version: 1,
|
||||
plaintext: Some(plaintext_key),
|
||||
ciphertext: general_purpose::STANDARD
|
||||
.decode(&encrypted_key)
|
||||
.map_err(|e| KmsError::cryptographic_error("decode", e.to_string()))?,
|
||||
key_spec: request.key_spec.clone(),
|
||||
metadata: request.encryption_context.clone(),
|
||||
created_at: chrono::Utc::now(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn encrypt(&self, request: &EncryptRequest, _context: Option<&OperationContext>) -> Result<EncryptResponse> {
|
||||
@@ -340,42 +278,15 @@ impl KmsClient for VaultKmsClient {
|
||||
})
|
||||
}
|
||||
|
||||
async fn decrypt(&self, request: &DecryptRequest, _context: Option<&OperationContext>) -> Result<Vec<u8>> {
|
||||
async fn decrypt(&self, _request: &DecryptRequest, _context: Option<&OperationContext>) -> Result<Vec<u8>> {
|
||||
debug!("Decrypting data");
|
||||
|
||||
// Parse the data key envelope from ciphertext
|
||||
let envelope: DataKeyEnvelope = serde_json::from_slice(&request.ciphertext)
|
||||
.map_err(|e| KmsError::cryptographic_error("parse", format!("Failed to parse data key envelope: {e}")))?;
|
||||
|
||||
// Verify encryption context matches
|
||||
// Check that all keys in envelope.encryption_context are present in request.encryption_context
|
||||
// and their values match. This ensures the context used for decryption matches what was used for encryption.
|
||||
for (key, expected_value) in &envelope.encryption_context {
|
||||
if let Some(actual_value) = request.encryption_context.get(key) {
|
||||
if actual_value != expected_value {
|
||||
return Err(KmsError::context_mismatch(format!(
|
||||
"Context mismatch for key '{key}': expected '{expected_value}', got '{actual_value}'"
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
// If request.encryption_context is empty, allow decryption (backward compatibility)
|
||||
// Otherwise, require all envelope context keys to be present
|
||||
if !request.encryption_context.is_empty() {
|
||||
return Err(KmsError::context_mismatch(format!("Missing context key '{key}'")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decrypt the data key
|
||||
let plaintext = self
|
||||
.decrypt_with_master_key(&envelope.master_key_id, &envelope.encrypted_key, &envelope.nonce)
|
||||
.await?;
|
||||
|
||||
info!("Successfully decrypted data");
|
||||
Ok(plaintext)
|
||||
// For this simple implementation, we assume the key ID is embedded in the ciphertext metadata
|
||||
// In practice, you'd extract this from the ciphertext envelope
|
||||
Err(KmsError::invalid_operation("Decrypt not fully implemented for Vault backend"))
|
||||
}
|
||||
|
||||
async fn create_key(&self, key_id: &str, algorithm: &str, _context: Option<&OperationContext>) -> Result<MasterKeyInfo> {
|
||||
async fn create_key(&self, key_id: &str, algorithm: &str, _context: Option<&OperationContext>) -> Result<MasterKey> {
|
||||
debug!("Creating master key: {} with algorithm: {}", key_id, algorithm);
|
||||
|
||||
// Check if key already exists
|
||||
@@ -384,7 +295,7 @@ impl KmsClient for VaultKmsClient {
|
||||
}
|
||||
|
||||
// Generate key material
|
||||
let key_material = generate_key_material(algorithm)?;
|
||||
let key_material = Self::generate_key_material(algorithm)?;
|
||||
let encrypted_material = self.encrypt_key_material(&key_material).await?;
|
||||
|
||||
// Create key data
|
||||
@@ -403,7 +314,7 @@ impl KmsClient for VaultKmsClient {
|
||||
// Store in Vault
|
||||
self.store_key_data(key_id, &key_data).await?;
|
||||
|
||||
let master_key = MasterKeyInfo {
|
||||
let master_key = MasterKey {
|
||||
key_id: key_id.to_string(),
|
||||
version: key_data.version,
|
||||
algorithm: key_data.algorithm.clone(),
|
||||
@@ -526,19 +437,19 @@ impl KmsClient for VaultKmsClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn rotate_key(&self, key_id: &str, _context: Option<&OperationContext>) -> Result<MasterKeyInfo> {
|
||||
async fn rotate_key(&self, key_id: &str, _context: Option<&OperationContext>) -> Result<MasterKey> {
|
||||
debug!("Rotating key: {}", key_id);
|
||||
|
||||
let mut key_data = self.get_key_data(key_id).await?;
|
||||
key_data.version += 1;
|
||||
|
||||
// Generate new key material
|
||||
let key_material = generate_key_material(&key_data.algorithm)?;
|
||||
let key_material = Self::generate_key_material(&key_data.algorithm)?;
|
||||
key_data.encrypted_key_material = self.encrypt_key_material(&key_material).await?;
|
||||
|
||||
self.store_key_data(key_id, &key_data).await?;
|
||||
|
||||
let master_key = MasterKeyInfo {
|
||||
let master_key = MasterKey {
|
||||
key_id: key_id.to_string(),
|
||||
version: key_data.version,
|
||||
algorithm: key_data.algorithm,
|
||||
|
||||
@@ -1,313 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Data Encryption Key (DEK) encryption interface and implementations
|
||||
//!
|
||||
//! This module provides a unified interface for encrypting and decrypting
|
||||
//! data encryption keys using master keys. It abstracts the encryption
|
||||
//! operations so that different backends can share the same encryption logic.
|
||||
|
||||
#![allow(dead_code)] // Trait methods may be used by implementations
|
||||
|
||||
use crate::error::{KmsError, Result};
|
||||
use async_trait::async_trait;
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Data key envelope for encrypting/decrypting data keys
|
||||
///
|
||||
/// This structure stores the encrypted DEK along with metadata needed for decryption.
|
||||
/// The `master_key_version` field records which version of the KEK (Key Encryption Key)
|
||||
/// was used to encrypt this DEK, enabling proper key rotation support.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DataKeyEnvelope {
|
||||
pub key_id: String,
|
||||
pub master_key_id: String,
|
||||
pub key_spec: String,
|
||||
pub encrypted_key: Vec<u8>,
|
||||
pub nonce: Vec<u8>,
|
||||
pub encryption_context: HashMap<String, String>,
|
||||
pub created_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
/// Trait for encrypting and decrypting data encryption keys (DEK)
|
||||
///
|
||||
/// This trait abstracts the encryption operations used to protect
|
||||
/// data encryption keys with master keys. Different implementations
|
||||
/// can use different encryption algorithms (e.g., AES-256-GCM).
|
||||
#[async_trait]
|
||||
pub trait DekCrypto: Send + Sync {
|
||||
/// Encrypt plaintext data using a master key material
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `key_material` - The master key material (raw bytes)
|
||||
/// * `plaintext` - The data to encrypt
|
||||
///
|
||||
/// # Returns
|
||||
/// A tuple of (ciphertext, nonce) where:
|
||||
/// - `ciphertext` - The encrypted data
|
||||
/// - `nonce` - The nonce used for encryption (should be stored with ciphertext)
|
||||
async fn encrypt(&self, key_material: &[u8], plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>)>;
|
||||
|
||||
/// Decrypt ciphertext data using a master key material
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `key_material` - The master key material (raw bytes)
|
||||
/// * `ciphertext` - The encrypted data
|
||||
/// * `nonce` - The nonce used for encryption
|
||||
///
|
||||
/// # Returns
|
||||
/// The decrypted plaintext data
|
||||
async fn decrypt(&self, key_material: &[u8], ciphertext: &[u8], nonce: &[u8]) -> Result<Vec<u8>>;
|
||||
|
||||
/// Get the algorithm name used by this implementation
|
||||
#[allow(dead_code)] // May be used by implementations or for debugging
|
||||
fn algorithm(&self) -> &'static str;
|
||||
|
||||
/// Get the required key material size in bytes
|
||||
#[allow(dead_code)] // May be used by implementations or for debugging
|
||||
fn key_size(&self) -> usize;
|
||||
}
|
||||
|
||||
/// AES-256-GCM implementation of DEK encryption
|
||||
pub struct AesDekCrypto;
|
||||
|
||||
impl AesDekCrypto {
|
||||
/// Create a new AES-256-GCM DEK crypto instance
|
||||
pub fn new() -> Self {
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DekCrypto for AesDekCrypto {
|
||||
async fn encrypt(&self, key_material: &[u8], plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>)> {
|
||||
use aes_gcm::{
|
||||
Aes256Gcm, Key, Nonce,
|
||||
aead::{Aead, KeyInit},
|
||||
};
|
||||
|
||||
// Validate key material length
|
||||
if key_material.len() != 32 {
|
||||
return Err(KmsError::cryptographic_error(
|
||||
"key",
|
||||
format!("Invalid key length: expected 32 bytes, got {}", key_material.len()),
|
||||
));
|
||||
}
|
||||
|
||||
// Create cipher from key material
|
||||
let key =
|
||||
Key::<Aes256Gcm>::try_from(key_material).map_err(|_| KmsError::cryptographic_error("key", "Invalid key length"))?;
|
||||
let cipher = Aes256Gcm::new(&key);
|
||||
|
||||
// Generate random nonce (12 bytes for GCM)
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
rand::rng().fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from(nonce_bytes);
|
||||
|
||||
// Encrypt plaintext
|
||||
let ciphertext = cipher
|
||||
.encrypt(&nonce, plaintext)
|
||||
.map_err(|e| KmsError::cryptographic_error("encrypt", e.to_string()))?;
|
||||
|
||||
Ok((ciphertext, nonce_bytes.to_vec()))
|
||||
}
|
||||
|
||||
async fn decrypt(&self, key_material: &[u8], ciphertext: &[u8], nonce: &[u8]) -> Result<Vec<u8>> {
|
||||
use aes_gcm::{
|
||||
Aes256Gcm, Key, Nonce,
|
||||
aead::{Aead, KeyInit},
|
||||
};
|
||||
|
||||
// Validate nonce length
|
||||
if nonce.len() != 12 {
|
||||
return Err(KmsError::cryptographic_error("nonce", "Invalid nonce length: expected 12 bytes"));
|
||||
}
|
||||
|
||||
// Validate key material length
|
||||
if key_material.len() != 32 {
|
||||
return Err(KmsError::cryptographic_error(
|
||||
"key",
|
||||
format!("Invalid key length: expected 32 bytes, got {}", key_material.len()),
|
||||
));
|
||||
}
|
||||
|
||||
// Create cipher from key material
|
||||
let key =
|
||||
Key::<Aes256Gcm>::try_from(key_material).map_err(|_| KmsError::cryptographic_error("key", "Invalid key length"))?;
|
||||
let cipher = Aes256Gcm::new(&key);
|
||||
|
||||
// Convert nonce
|
||||
let mut nonce_array = [0u8; 12];
|
||||
nonce_array.copy_from_slice(nonce);
|
||||
let nonce_ref = Nonce::from(nonce_array);
|
||||
|
||||
// Decrypt ciphertext
|
||||
let plaintext = cipher
|
||||
.decrypt(&nonce_ref, ciphertext)
|
||||
.map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // Trait method, may be used by implementations
|
||||
fn algorithm(&self) -> &'static str {
|
||||
"AES-256-GCM"
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // Trait method, may be used by implementations
|
||||
fn key_size(&self) -> usize {
|
||||
32 // 256 bits
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AesDekCrypto {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate random key material for the given algorithm
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `algorithm` - The key algorithm (e.g., "AES_256", "AES_128")
|
||||
///
|
||||
/// # Returns
|
||||
/// A vector containing the generated key material
|
||||
pub fn generate_key_material(algorithm: &str) -> Result<Vec<u8>> {
|
||||
let key_size = match algorithm {
|
||||
"AES_256" => 32,
|
||||
"AES_128" => 16,
|
||||
_ => return Err(KmsError::unsupported_algorithm(algorithm)),
|
||||
};
|
||||
|
||||
let mut key_material = vec![0u8; key_size];
|
||||
rand::rng().fill_bytes(&mut key_material);
|
||||
Ok(key_material)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_aes_dek_crypto_encrypt_decrypt() {
|
||||
let crypto = AesDekCrypto::new();
|
||||
|
||||
// Generate test key material
|
||||
let key_material = generate_key_material("AES_256").expect("Failed to generate key material");
|
||||
let plaintext = b"Hello, World! This is a test message.";
|
||||
|
||||
// Test encryption
|
||||
let (ciphertext, nonce) = crypto
|
||||
.encrypt(&key_material, plaintext)
|
||||
.await
|
||||
.expect("Encryption should succeed");
|
||||
|
||||
assert!(!ciphertext.is_empty());
|
||||
assert_eq!(nonce.len(), 12);
|
||||
assert_ne!(ciphertext, plaintext);
|
||||
|
||||
// Test decryption
|
||||
let decrypted = crypto
|
||||
.decrypt(&key_material, &ciphertext, &nonce)
|
||||
.await
|
||||
.expect("Decryption should succeed");
|
||||
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_aes_dek_crypto_invalid_key_size() {
|
||||
let crypto = AesDekCrypto::new();
|
||||
let invalid_key = vec![0u8; 16]; // Too short
|
||||
let plaintext = b"test";
|
||||
|
||||
let result = crypto.encrypt(&invalid_key, plaintext).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_aes_dek_crypto_invalid_nonce() {
|
||||
let crypto = AesDekCrypto::new();
|
||||
let key_material = generate_key_material("AES_256").expect("Failed to generate key material");
|
||||
let ciphertext = vec![0u8; 16];
|
||||
let invalid_nonce = vec![0u8; 8]; // Too short
|
||||
|
||||
let result = crypto.decrypt(&key_material, &ciphertext, &invalid_nonce).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_generate_key_material() {
|
||||
let key_256 = generate_key_material("AES_256").expect("Should generate AES_256 key");
|
||||
assert_eq!(key_256.len(), 32);
|
||||
|
||||
let key_128 = generate_key_material("AES_128").expect("Should generate AES_128 key");
|
||||
assert_eq!(key_128.len(), 16);
|
||||
|
||||
// Keys should be different
|
||||
let key_256_2 = generate_key_material("AES_256").expect("Should generate AES_256 key");
|
||||
assert_ne!(key_256, key_256_2);
|
||||
|
||||
// Invalid algorithm
|
||||
assert!(generate_key_material("INVALID").is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_data_key_envelope_serialization() {
|
||||
let envelope = DataKeyEnvelope {
|
||||
key_id: "test-key-id".to_string(),
|
||||
master_key_id: "master-key-id".to_string(),
|
||||
key_spec: "AES_256".to_string(),
|
||||
encrypted_key: vec![1, 2, 3, 4],
|
||||
nonce: vec![5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
|
||||
encryption_context: {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("bucket".to_string(), "test-bucket".to_string());
|
||||
map
|
||||
},
|
||||
created_at: chrono::Utc::now(),
|
||||
};
|
||||
|
||||
// Test serialization
|
||||
let serialized = serde_json::to_vec(&envelope).expect("Serialization should succeed");
|
||||
assert!(!serialized.is_empty());
|
||||
|
||||
// Test deserialization
|
||||
let deserialized: DataKeyEnvelope = serde_json::from_slice(&serialized).expect("Deserialization should succeed");
|
||||
assert_eq!(deserialized.key_id, envelope.key_id);
|
||||
assert_eq!(deserialized.master_key_id, envelope.master_key_id);
|
||||
assert_eq!(deserialized.encrypted_key, envelope.encrypted_key);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_data_key_envelope_backward_compatibility() {
|
||||
// Test that old envelopes without master_key_version can still be deserialized
|
||||
let old_envelope_json = r#"{
|
||||
"key_id": "test-key-id",
|
||||
"master_key_id": "master-key-id",
|
||||
"key_spec": "AES_256",
|
||||
"encrypted_key": [1, 2, 3, 4],
|
||||
"nonce": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
|
||||
"encryption_context": {"bucket": "test-bucket"},
|
||||
"created_at": "2024-01-01T00:00:00Z"
|
||||
}"#;
|
||||
|
||||
let deserialized: DataKeyEnvelope = serde_json::from_str(old_envelope_json).expect("Should deserialize old format");
|
||||
assert_eq!(deserialized.key_id, "test-key-id");
|
||||
assert_eq!(deserialized.master_key_id, "master-key-id");
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
//! Object encryption service implementation
|
||||
|
||||
pub mod ciphers;
|
||||
pub mod dek;
|
||||
mod ciphers;
|
||||
pub mod service;
|
||||
|
||||
pub use dek::{AesDekCrypto, DataKeyEnvelope, DekCrypto, generate_key_material};
|
||||
pub use service::ObjectEncryptionService;
|
||||
|
||||
@@ -273,8 +273,6 @@ impl ObjectEncryptionService {
|
||||
// Build encryption context
|
||||
let mut context = encryption_context.cloned().unwrap_or_default();
|
||||
context.insert("bucket".to_string(), bucket.to_string());
|
||||
context.insert("object_key".to_string(), object_key.to_string());
|
||||
// Backward compatibility: also include legacy "object" context key
|
||||
context.insert("object".to_string(), object_key.to_string());
|
||||
context.insert("algorithm".to_string(), algorithm.as_str().to_string());
|
||||
|
||||
@@ -63,7 +63,6 @@ pub mod config;
|
||||
mod encryption;
|
||||
mod error;
|
||||
pub mod manager;
|
||||
pub mod service;
|
||||
pub mod service_manager;
|
||||
pub mod types;
|
||||
|
||||
@@ -74,9 +73,10 @@ pub use api_types::{
|
||||
UntagKeyRequest, UntagKeyResponse, UpdateKeyDescriptionRequest, UpdateKeyDescriptionResponse,
|
||||
};
|
||||
pub use config::*;
|
||||
pub use encryption::ObjectEncryptionService;
|
||||
pub use encryption::service::DataKey;
|
||||
pub use error::{KmsError, Result};
|
||||
pub use manager::KmsManager;
|
||||
pub use service::{DataKey, ObjectEncryptionService};
|
||||
pub use service_manager::{
|
||||
KmsServiceManager, KmsServiceStatus, get_global_encryption_service, get_global_kms_service_manager,
|
||||
init_global_kms_service_manager,
|
||||
@@ -112,7 +112,6 @@ pub fn shutdown_global_services() {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -140,91 +139,4 @@ mod tests {
|
||||
// Test stop
|
||||
manager.stop().await.expect("Stop should succeed");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_versioned_service_reconfiguration() {
|
||||
// Test versioned service reconfiguration for zero-downtime
|
||||
let manager = KmsServiceManager::new();
|
||||
|
||||
// Initial state: no version
|
||||
assert!(manager.get_service_version().await.is_none());
|
||||
|
||||
// Start first service
|
||||
let temp_dir1 = TempDir::new().expect("Failed to create temp dir");
|
||||
let config1 = KmsConfig::local(temp_dir1.path().to_path_buf());
|
||||
manager
|
||||
.configure(config1.clone())
|
||||
.await
|
||||
.expect("Configuration should succeed");
|
||||
manager.start().await.expect("Start should succeed");
|
||||
|
||||
// Verify version 1
|
||||
let version1 = manager.get_service_version().await.expect("Service should have version");
|
||||
assert_eq!(version1, 1);
|
||||
|
||||
// Get service reference (simulating ongoing operation)
|
||||
let service1 = manager.get_encryption_service().await.expect("Service should be available");
|
||||
|
||||
// Reconfigure to new service (zero-downtime)
|
||||
let temp_dir2 = TempDir::new().expect("Failed to create temp dir");
|
||||
let config2 = KmsConfig::local(temp_dir2.path().to_path_buf());
|
||||
manager.reconfigure(config2).await.expect("Reconfiguration should succeed");
|
||||
|
||||
// Verify version 2
|
||||
let version2 = manager.get_service_version().await.expect("Service should have version");
|
||||
assert_eq!(version2, 2);
|
||||
|
||||
// Old service reference should still be valid (Arc keeps it alive)
|
||||
// New requests should get version 2
|
||||
let service2 = manager.get_encryption_service().await.expect("Service should be available");
|
||||
|
||||
// Verify they are different instances
|
||||
assert!(!Arc::ptr_eq(&service1, &service2));
|
||||
|
||||
// Old service should still work (simulating long-running operation)
|
||||
// This demonstrates zero-downtime: old operations continue, new operations use new service
|
||||
assert!(service1.health_check().await.is_ok());
|
||||
assert!(service2.health_check().await.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_reconfiguration() {
|
||||
// Test that concurrent reconfiguration requests are serialized
|
||||
let manager = Arc::new(KmsServiceManager::new());
|
||||
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let base_path = temp_dir.path().to_path_buf();
|
||||
|
||||
// Initial configuration
|
||||
let config1 = KmsConfig::local(base_path.clone());
|
||||
manager.configure(config1).await.expect("Configuration should succeed");
|
||||
manager.start().await.expect("Start should succeed");
|
||||
|
||||
// Spawn multiple concurrent reconfiguration requests
|
||||
let mut handles = Vec::new();
|
||||
for _i in 0..5 {
|
||||
let manager_clone = manager.clone();
|
||||
let path = base_path.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let config = KmsConfig::local(path);
|
||||
manager_clone.reconfigure(config).await
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all reconfigurations to complete
|
||||
let mut results = Vec::new();
|
||||
for handle in handles {
|
||||
results.push(handle.await);
|
||||
}
|
||||
|
||||
// All should succeed (serialized by mutex)
|
||||
for result in results {
|
||||
assert!(result.expect("Task should complete").is_ok());
|
||||
}
|
||||
|
||||
// Final version should be 6 (1 initial + 5 reconfigurations)
|
||||
let final_version = manager.get_service_version().await.expect("Service should have version");
|
||||
assert_eq!(final_version, 6);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,15 +16,11 @@
|
||||
|
||||
use crate::backends::{KmsBackend, local::LocalKmsBackend};
|
||||
use crate::config::{BackendConfig, KmsConfig};
|
||||
use crate::encryption::service::ObjectEncryptionService;
|
||||
use crate::error::{KmsError, Result};
|
||||
use crate::manager::KmsManager;
|
||||
use crate::service::ObjectEncryptionService;
|
||||
use arc_swap::ArcSwap;
|
||||
use std::sync::{
|
||||
Arc, OnceLock,
|
||||
atomic::{AtomicU64, Ordering},
|
||||
};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
/// KMS service status
|
||||
@@ -40,43 +36,26 @@ pub enum KmsServiceStatus {
|
||||
Error(String),
|
||||
}
|
||||
|
||||
/// Service version information for zero-downtime reconfiguration
|
||||
#[derive(Clone)]
|
||||
struct ServiceVersion {
|
||||
/// Service version number (monotonically increasing)
|
||||
version: u64,
|
||||
/// The encryption service instance
|
||||
service: Arc<ObjectEncryptionService>,
|
||||
/// The KMS manager instance
|
||||
manager: Arc<KmsManager>,
|
||||
}
|
||||
|
||||
/// Dynamic KMS service manager with versioned services for zero-downtime reconfiguration
|
||||
/// Dynamic KMS service manager
|
||||
pub struct KmsServiceManager {
|
||||
/// Current service version (if running)
|
||||
/// Uses ArcSwap for atomic, lock-free service switching
|
||||
/// This allows instant atomic updates without blocking readers
|
||||
current_service: ArcSwap<Option<ServiceVersion>>,
|
||||
/// Current KMS manager (if running)
|
||||
manager: Arc<RwLock<Option<Arc<KmsManager>>>>,
|
||||
/// Current encryption service (if running)
|
||||
encryption_service: Arc<RwLock<Option<Arc<ObjectEncryptionService>>>>,
|
||||
/// Current configuration
|
||||
config: Arc<RwLock<Option<KmsConfig>>>,
|
||||
/// Current status
|
||||
status: Arc<RwLock<KmsServiceStatus>>,
|
||||
/// Version counter (monotonically increasing)
|
||||
version_counter: Arc<AtomicU64>,
|
||||
/// Mutex to protect lifecycle operations (start, stop, reconfigure)
|
||||
/// This ensures only one lifecycle operation happens at a time
|
||||
lifecycle_mutex: Arc<Mutex<()>>,
|
||||
}
|
||||
|
||||
impl KmsServiceManager {
|
||||
/// Create a new KMS service manager (not configured)
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
current_service: ArcSwap::from_pointee(None),
|
||||
manager: Arc::new(RwLock::new(None)),
|
||||
encryption_service: Arc::new(RwLock::new(None)),
|
||||
config: Arc::new(RwLock::new(None)),
|
||||
status: Arc::new(RwLock::new(KmsServiceStatus::NotConfigured)),
|
||||
version_counter: Arc::new(AtomicU64::new(0)),
|
||||
lifecycle_mutex: Arc::new(Mutex::new(())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,12 +89,6 @@ impl KmsServiceManager {
|
||||
|
||||
/// Start KMS service with current configuration
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
let _guard = self.lifecycle_mutex.lock().await;
|
||||
self.start_internal().await
|
||||
}
|
||||
|
||||
/// Internal start implementation (called within lifecycle mutex)
|
||||
async fn start_internal(&self) -> Result<()> {
|
||||
let config = {
|
||||
let config_guard = self.config.read().await;
|
||||
match config_guard.as_ref() {
|
||||
@@ -132,11 +105,23 @@ impl KmsServiceManager {
|
||||
|
||||
info!("Starting KMS service with backend: {:?}", config.backend);
|
||||
|
||||
match self.create_service_version(&config).await {
|
||||
Ok(service_version) => {
|
||||
// Atomically update to new service version (lock-free, instant)
|
||||
// ArcSwap::store() is a true atomic operation using CAS
|
||||
self.current_service.store(Arc::new(Some(service_version)));
|
||||
match self.create_backend(&config).await {
|
||||
Ok(backend) => {
|
||||
// Create KMS manager
|
||||
let kms_manager = Arc::new(KmsManager::new(backend, config));
|
||||
|
||||
// Create encryption service
|
||||
let encryption_service = Arc::new(ObjectEncryptionService::new((*kms_manager).clone()));
|
||||
|
||||
// Update manager and service
|
||||
{
|
||||
let mut manager = self.manager.write().await;
|
||||
*manager = Some(kms_manager);
|
||||
}
|
||||
{
|
||||
let mut service = self.encryption_service.write().await;
|
||||
*service = Some(encryption_service);
|
||||
}
|
||||
|
||||
// Update status
|
||||
{
|
||||
@@ -158,21 +143,18 @@ impl KmsServiceManager {
|
||||
}
|
||||
|
||||
/// Stop KMS service
|
||||
///
|
||||
/// Note: This stops accepting new operations, but existing operations using
|
||||
/// the service will continue until they complete (due to Arc reference counting).
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
let _guard = self.lifecycle_mutex.lock().await;
|
||||
self.stop_internal().await
|
||||
}
|
||||
|
||||
/// Internal stop implementation (called within lifecycle mutex)
|
||||
async fn stop_internal(&self) -> Result<()> {
|
||||
info!("Stopping KMS service");
|
||||
|
||||
// Atomically clear current service version (lock-free, instant)
|
||||
// Note: Existing Arc references will keep the service alive until operations complete
|
||||
self.current_service.store(Arc::new(None));
|
||||
// Clear manager and service
|
||||
{
|
||||
let mut manager = self.manager.write().await;
|
||||
*manager = None;
|
||||
}
|
||||
{
|
||||
let mut service = self.encryption_service.write().await;
|
||||
*service = None;
|
||||
}
|
||||
|
||||
// Update status (keep configuration)
|
||||
{
|
||||
@@ -182,96 +164,37 @@ impl KmsServiceManager {
|
||||
}
|
||||
}
|
||||
|
||||
info!("KMS service stopped successfully (existing operations may continue)");
|
||||
info!("KMS service stopped successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reconfigure and restart KMS service with zero-downtime
|
||||
///
|
||||
/// This method implements versioned service switching:
|
||||
/// 1. Creates a new service version without stopping the old one
|
||||
/// 2. Atomically switches to the new version
|
||||
/// 3. Old operations continue using the old service (via Arc reference counting)
|
||||
/// 4. New operations automatically use the new service
|
||||
///
|
||||
/// This ensures zero downtime during reconfiguration, even for long-running
|
||||
/// operations like encrypting large files.
|
||||
/// Reconfigure and restart KMS service
|
||||
pub async fn reconfigure(&self, new_config: KmsConfig) -> Result<()> {
|
||||
let _guard = self.lifecycle_mutex.lock().await;
|
||||
info!("Reconfiguring KMS service");
|
||||
|
||||
info!("Reconfiguring KMS service (zero-downtime)");
|
||||
// Stop current service if running
|
||||
if matches!(self.get_status().await, KmsServiceStatus::Running) {
|
||||
self.stop().await?;
|
||||
}
|
||||
|
||||
// Configure with new config
|
||||
{
|
||||
let mut config = self.config.write().await;
|
||||
*config = Some(new_config.clone());
|
||||
}
|
||||
self.configure(new_config).await?;
|
||||
|
||||
// Create new service version without stopping old one
|
||||
// This allows existing operations to continue while new operations use new service
|
||||
match self.create_service_version(&new_config).await {
|
||||
Ok(new_service_version) => {
|
||||
// Get old version for logging (lock-free read)
|
||||
let old_version = self.current_service.load().as_ref().as_ref().map(|sv| sv.version);
|
||||
// Start with new configuration
|
||||
self.start().await?;
|
||||
|
||||
// Atomically switch to new service version (lock-free, instant CAS operation)
|
||||
// This is a true atomic operation - no waiting for locks, instant switch
|
||||
// Old service will be dropped when no more Arc references exist
|
||||
self.current_service.store(Arc::new(Some(new_service_version.clone())));
|
||||
|
||||
// Update status
|
||||
{
|
||||
let mut status = self.status.write().await;
|
||||
*status = KmsServiceStatus::Running;
|
||||
}
|
||||
|
||||
if let Some(old_ver) = old_version {
|
||||
info!(
|
||||
"KMS service reconfigured successfully: version {} -> {} (old service will be cleaned up when operations complete)",
|
||||
old_ver, new_service_version.version
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"KMS service reconfigured successfully: version {} (service started)",
|
||||
new_service_version.version
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
let err_msg = format!("Failed to reconfigure KMS: {e}");
|
||||
error!("{}", err_msg);
|
||||
let mut status = self.status.write().await;
|
||||
*status = KmsServiceStatus::Error(err_msg.clone());
|
||||
Err(KmsError::backend_error(&err_msg))
|
||||
}
|
||||
}
|
||||
info!("KMS service reconfigured successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get KMS manager (if running)
|
||||
///
|
||||
/// Returns the manager from the current service version.
|
||||
/// Uses lock-free atomic load for optimal performance.
|
||||
pub async fn get_manager(&self) -> Option<Arc<KmsManager>> {
|
||||
self.current_service.load().as_ref().as_ref().map(|sv| sv.manager.clone())
|
||||
self.manager.read().await.clone()
|
||||
}
|
||||
|
||||
/// Get encryption service (if running)
|
||||
///
|
||||
/// Returns the service from the current service version.
|
||||
/// Uses lock-free atomic load - no blocking, instant access.
|
||||
/// This ensures new operations always use the latest service version,
|
||||
/// while existing operations continue using their Arc references.
|
||||
/// Get encryption service (if running)
|
||||
pub async fn get_encryption_service(&self) -> Option<Arc<ObjectEncryptionService>> {
|
||||
self.current_service.load().as_ref().as_ref().map(|sv| sv.service.clone())
|
||||
}
|
||||
|
||||
/// Get current service version number
|
||||
///
|
||||
/// Useful for monitoring and debugging.
|
||||
/// Uses lock-free atomic load.
|
||||
pub async fn get_service_version(&self) -> Option<u64> {
|
||||
self.current_service.load().as_ref().as_ref().map(|sv| sv.version)
|
||||
self.encryption_service.read().await.clone()
|
||||
}
|
||||
|
||||
/// Health check for the KMS service
|
||||
@@ -303,40 +226,20 @@ impl KmsServiceManager {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new service version from configuration
|
||||
///
|
||||
/// This creates a new backend, manager, and service, and assigns it a new version number.
|
||||
async fn create_service_version(&self, config: &KmsConfig) -> Result<ServiceVersion> {
|
||||
// Increment version counter
|
||||
let version = self.version_counter.fetch_add(1, Ordering::Relaxed) + 1;
|
||||
|
||||
info!("Creating KMS service version {} with backend: {:?}", version, config.backend);
|
||||
|
||||
// Create backend
|
||||
let backend = match &config.backend_config {
|
||||
/// Create backend from configuration
|
||||
async fn create_backend(&self, config: &KmsConfig) -> Result<Arc<dyn KmsBackend>> {
|
||||
match &config.backend_config {
|
||||
BackendConfig::Local(_) => {
|
||||
info!("Creating Local KMS backend for version {}", version);
|
||||
info!("Creating Local KMS backend");
|
||||
let backend = LocalKmsBackend::new(config.clone()).await?;
|
||||
Arc::new(backend) as Arc<dyn KmsBackend>
|
||||
Ok(Arc::new(backend))
|
||||
}
|
||||
BackendConfig::Vault(_) => {
|
||||
info!("Creating Vault KMS backend for version {}", version);
|
||||
info!("Creating Vault KMS backend");
|
||||
let backend = crate::backends::vault::VaultKmsBackend::new(config.clone()).await?;
|
||||
Arc::new(backend) as Arc<dyn KmsBackend>
|
||||
Ok(Arc::new(backend))
|
||||
}
|
||||
};
|
||||
|
||||
// Create KMS manager
|
||||
let kms_manager = Arc::new(KmsManager::new(backend, config.clone()));
|
||||
|
||||
// Create encryption service
|
||||
let encryption_service = Arc::new(ObjectEncryptionService::new((*kms_manager).clone()));
|
||||
|
||||
Ok(ServiceVersion {
|
||||
version,
|
||||
service: encryption_service,
|
||||
manager: kms_manager,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ use zeroize::Zeroize;
|
||||
|
||||
/// Data encryption key (DEK) used for encrypting object data
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DataKeyInfo {
|
||||
pub struct DataKey {
|
||||
/// Key identifier
|
||||
pub key_id: String,
|
||||
/// Key version
|
||||
@@ -40,7 +40,7 @@ pub struct DataKeyInfo {
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl DataKeyInfo {
|
||||
impl DataKey {
|
||||
/// Create a new data key
|
||||
///
|
||||
/// # Arguments
|
||||
@@ -96,7 +96,7 @@ impl DataKeyInfo {
|
||||
|
||||
/// Master key stored in KMS backend
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MasterKeyInfo {
|
||||
pub struct MasterKey {
|
||||
/// Unique key identifier
|
||||
pub key_id: String,
|
||||
/// Key version
|
||||
@@ -119,7 +119,7 @@ pub struct MasterKeyInfo {
|
||||
pub created_by: Option<String>,
|
||||
}
|
||||
|
||||
impl MasterKeyInfo {
|
||||
impl MasterKey {
|
||||
/// Create a new master key
|
||||
///
|
||||
/// # Arguments
|
||||
@@ -226,8 +226,8 @@ pub struct KeyInfo {
|
||||
pub created_by: Option<String>,
|
||||
}
|
||||
|
||||
impl From<MasterKeyInfo> for KeyInfo {
|
||||
fn from(master_key: MasterKeyInfo) -> Self {
|
||||
impl From<MasterKey> for KeyInfo {
|
||||
fn from(master_key: MasterKey) -> Self {
|
||||
Self {
|
||||
key_id: master_key.key_id,
|
||||
description: master_key.description,
|
||||
@@ -913,7 +913,7 @@ pub struct CancelKeyDeletionResponse {
|
||||
}
|
||||
|
||||
// SECURITY: Implement Drop to automatically zero sensitive data when DataKey is dropped
|
||||
impl Drop for DataKeyInfo {
|
||||
impl Drop for DataKey {
|
||||
fn drop(&mut self) {
|
||||
self.clear_plaintext();
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::notification_system_subscriber::NotificationSystemSubscriberView;
|
||||
use crate::notifier::TargetList;
|
||||
use crate::{
|
||||
Event, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry, rules::BucketNotificationConfig, stream,
|
||||
};
|
||||
@@ -192,22 +191,6 @@ impl NotificationSystem {
|
||||
self.notifier.target_list().read().await.keys()
|
||||
}
|
||||
|
||||
/// Gets the complete Target list, including both active and inactive Targets.
|
||||
///
|
||||
/// # Return
|
||||
/// An `Arc<RwLock<TargetList>>` containing all Targets.
|
||||
pub async fn get_all_targets(&self) -> Arc<RwLock<TargetList>> {
|
||||
self.notifier.target_list()
|
||||
}
|
||||
|
||||
/// Gets all Target values, including both active and inactive Targets.
|
||||
///
|
||||
/// # Return
|
||||
/// A Vec containing all Targets.
|
||||
pub async fn get_target_values(&self) -> Vec<Arc<dyn Target<Event> + Send + Sync>> {
|
||||
self.notifier.target_list().read().await.values()
|
||||
}
|
||||
|
||||
/// Checks if there are active subscribers for the given bucket and event name.
|
||||
pub async fn has_subscriber(&self, bucket: &str, event: &EventName) -> bool {
|
||||
if !self.subscriber_view.has_subscriber(bucket, event) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user