merge main

This commit is contained in:
weisd
2026-01-12 13:35:28 +08:00
56 changed files with 3743 additions and 804 deletions

View File

@@ -44,7 +44,6 @@ jobs:
set -x
old_version=$(grep "^appVersion:" helm/rustfs/Chart.yaml | awk '{print $2}')
sed -i "s/$old_version/$new_version/g" helm/rustfs/Chart.yaml
sed -i "/^image:/,/^[^ ]/ s/tag:.*/tag: "$new_version"/" helm/rustfs/values.yaml
- name: Set up Helm
uses: azure/setup-helm@v4.3.0

502
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -51,7 +51,7 @@ resolver = "2"
edition = "2024"
license = "Apache-2.0"
repository = "https://github.com/rustfs/rustfs"
rust-version = "1.88"
rust-version = "1.90"
version = "0.0.5"
homepage = "https://rustfs.com"
description = "RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. "
@@ -132,27 +132,27 @@ bytesize = "2.3.1"
byteorder = "1.5.0"
flatbuffers = "25.12.19"
form_urlencoded = "1.2.2"
prost = "0.14.1"
quick-xml = "0.38.4"
prost = "0.14.3"
quick-xml = "0.39.0"
rmcp = { version = "0.12.0" }
rmp = { version = "0.8.15" }
rmp-serde = { version = "1.3.1" }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = { version = "1.0.148", features = ["raw_value"] }
serde_json = { version = "1.0.149", features = ["raw_value"] }
serde_urlencoded = "0.7.1"
schemars = "1.2.0"
# Cryptography and Security
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
argon2 = { version = "0.6.0-rc.5" }
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
blake3 = { version = "1.8.3", features = ["rayon", "mmap"] }
chacha20poly1305 = { version = "0.11.0-rc.2" }
crc-fast = "1.6.0"
hmac = { version = "0.13.0-rc.3" }
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
pbkdf2 = "0.13.0-rc.5"
jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] }
pbkdf2 = "0.13.0-rc.6"
rsa = { version = "0.10.0-rc.11" }
rustls = { version = "0.23.35" }
rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] }
rustls-pemfile = "2.2.0"
rustls-pki-types = "1.13.2"
sha1 = "0.11.0-rc.3"
@@ -186,6 +186,7 @@ criterion = { version = "0.8", features = ["html_reports"] }
crossbeam-queue = "0.3.12"
datafusion = "51.0.0"
derive_builder = "0.20.2"
dunce = "1.0.5"
enumset = "1.1.10"
faster-hex = "0.10.0"
flate2 = "1.1.5"
@@ -199,7 +200,7 @@ hex-simd = "0.8.0"
highway = { version = "1.3.0" }
ipnetwork = { version = "0.21.1", features = ["serde"] }
lazy_static = "1.5.0"
libc = "0.2.179"
libc = "0.2.180"
libsystemd = "0.7.2"
local-ip-address = "0.6.8"
lz4 = "1.28.1"
@@ -247,13 +248,13 @@ tracing-error = "0.2.1"
tracing-opentelemetry = "0.32.0"
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
transform-stream = "0.3.1"
url = "2.5.7"
url = "2.5.8"
urlencoding = "2.1.3"
uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] }
vaultrs = { version = "0.7.4" }
walkdir = "2.5.0"
wildmatch = { version = "2.6.1", features = ["serde"] }
winapi = { version = "0.3.9" }
windows = { version = "0.62.2" }
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
zip = "7.0.0"
zstd = "0.13.3"
@@ -272,7 +273,7 @@ libunftp = "0.21.0"
russh = { version = "0.56.0", features = ["aws-lc-rs", "rsa"], default-features = false }
russh-sftp = "2.1.1"
ssh-key = { version = "0.7.0-rc.4", features = ["std", "rsa", "ed25519"] }
suppaftp = { version = "7.0.7", features = ["tokio", "tokio-rustls", "rustls"] }
suppaftp = { version = "7.1.0", features = ["tokio", "tokio-rustls", "rustls"] }
rcgen = "0.14.6"
# Performance Analysis and Memory Profiling

View File

@@ -1,4 +1,4 @@
FROM alpine:3.22 AS build
FROM alpine:3.23 AS build
ARG TARGETARCH
ARG RELEASE=latest
@@ -40,7 +40,7 @@ RUN set -eux; \
rm -rf rustfs.zip /build/.tmp || true
FROM alpine:3.22
FROM alpine:3.23
ARG RELEASE=latest
ARG BUILD_DATE

View File

@@ -16,7 +16,7 @@ ARG BUILDPLATFORM
# -----------------------------
# Build stage
# -----------------------------
FROM rust:1.88-bookworm AS builder
FROM rust:1.91-trixie AS builder
# Re-declare args after FROM
ARG TARGETPLATFORM

View File

@@ -83,6 +83,13 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2.
| **Edge & IoT** | **Strong Edge Support**<br>Ideal for secure, innovative edge devices. | **Weak Edge Support**<br>Often too heavy for edge gateways. |
| **Risk Profile** | **Enterprise Risk Mitigation**<br>Clear IP rights and safe for commercial use. | **Legal Risks**<br>Intellectual property ambiguity and usage restrictions. |
## Staying ahead
Star RustFS on GitHub and be instantly notified of new releases.
<img src="https://github.com/user-attachments/assets/7ee40bb4-3e46-4eac-b0d0-5fbeb85ff8f3" />
## Quickstart
To get started with RustFS, follow these steps:

View File

@@ -86,6 +86,15 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust
| **成本** | **稳定且免费**<br>免费社区支持,稳定的商业定价。 | **高昂成本**<br>1PiB 的成本可能高达 250,000 美元。 |
| **风险控制** | **企业级风险规避**<br>清晰的知识产权,商业使用安全无忧。 | **法律风险**<br>知识产权归属模糊及使用限制风险。 |
## 保持领先
在 GitHub 上为 RustFS 点赞,即可第一时间收到新版本发布通知。
<img src="https://github.com/user-attachments/assets/7ee40bb4-3e46-4eac-b0d0-5fbeb85ff8f3" />
## 快速开始
请按照以下步骤快速上手 RustFS

View File

@@ -21,6 +21,7 @@ use futures::stream::FuturesUnordered;
use hashbrown::{HashMap, HashSet};
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, EnableState, audit::AUDIT_ROUTE_PREFIX};
use rustfs_ecstore::config::{Config, KVS};
use rustfs_targets::arn::TargetID;
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
use std::str::FromStr;
use std::sync::Arc;
@@ -392,4 +393,80 @@ impl AuditRegistry {
Ok(())
}
/// Creates a unique key for a target based on its type and ID
///
/// # Arguments
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
/// * `target_id` - The identifier for the target instance.
///
/// # Returns
/// * `String` - The unique key for the target.
pub fn create_key(&self, target_type: &str, target_id: &str) -> String {
let key = TargetID::new(target_id.to_string(), target_type.to_string());
info!(target_type = %target_type, "Create key for {}", key);
key.to_string()
}
/// Enables a target (placeholder, assumes target exists)
///
/// # Arguments
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
/// * `target_id` - The identifier for the target instance.
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure.
pub fn enable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> {
let key = self.create_key(target_type, target_id);
if self.get_target(&key).is_some() {
info!("Target {}-{} enabled", target_type, target_id);
Ok(())
} else {
Err(AuditError::Configuration(
format!("Target not found: {}-{}", target_type, target_id),
None,
))
}
}
/// Disables a target (placeholder, assumes target exists)
///
/// # Arguments
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
/// * `target_id` - The identifier for the target instance.
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure.
pub fn disable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> {
let key = self.create_key(target_type, target_id);
if self.get_target(&key).is_some() {
info!("Target {}-{} disabled", target_type, target_id);
Ok(())
} else {
Err(AuditError::Configuration(
format!("Target not found: {}-{}", target_type, target_id),
None,
))
}
}
/// Upserts a target into the registry
///
/// # Arguments
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
/// * `target_id` - The identifier for the target instance.
/// * `target` - The target instance to be upserted.
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure.
pub fn upsert_target(
&mut self,
target_type: &str,
target_id: &str,
target: Box<dyn Target<AuditEntry> + Send + Sync>,
) -> AuditResult<()> {
let key = self.create_key(target_type, target_id);
self.targets.insert(key, target);
Ok(())
}
}

View File

@@ -274,9 +274,9 @@ impl AuditSystem {
drop(state);
let registry = self.registry.lock().await;
let target_ids = registry.list_targets();
let target_keys = registry.list_targets();
if target_ids.is_empty() {
if target_keys.is_empty() {
warn!("No audit targets configured for dispatch");
return Ok(());
}
@@ -284,22 +284,22 @@ impl AuditSystem {
// Dispatch to all targets concurrently
let mut tasks = Vec::new();
for target_id in target_ids {
if let Some(target) = registry.get_target(&target_id) {
for target_key in target_keys {
if let Some(target) = registry.get_target(&target_key) {
let entry_clone = Arc::clone(&entry);
let target_id_clone = target_id.clone();
let target_key_clone = target_key.clone();
// Create EntityTarget for the audit log entry
let entity_target = EntityTarget {
object_name: entry.api.name.clone().unwrap_or_default(),
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry
event_name: entry.event, // Default, should be derived from entry
data: (*entry_clone).clone(),
};
let task = async move {
let result = target.save(Arc::new(entity_target)).await;
(target_id_clone, result)
(target_key_clone, result)
};
tasks.push(task);
@@ -312,14 +312,14 @@ impl AuditSystem {
let mut errors = Vec::new();
let mut success_count = 0;
for (target_id, result) in results {
for (target_key, result) in results {
match result {
Ok(_) => {
success_count += 1;
observability::record_target_success();
}
Err(e) => {
error!(target_id = %target_id, error = %e, "Failed to dispatch audit log to target");
error!(target_id = %target_key, error = %e, "Failed to dispatch audit log to target");
errors.push(e);
observability::record_target_failure();
}
@@ -360,18 +360,18 @@ impl AuditSystem {
drop(state);
let registry = self.registry.lock().await;
let target_ids = registry.list_targets();
let target_keys = registry.list_targets();
if target_ids.is_empty() {
if target_keys.is_empty() {
warn!("No audit targets configured for batch dispatch");
return Ok(());
}
let mut tasks = Vec::new();
for target_id in target_ids {
if let Some(target) = registry.get_target(&target_id) {
for target_key in target_keys {
if let Some(target) = registry.get_target(&target_key) {
let entries_clone: Vec<_> = entries.iter().map(Arc::clone).collect();
let target_id_clone = target_id.clone();
let target_key_clone = target_key.clone();
let task = async move {
let mut success_count = 0;
@@ -380,7 +380,7 @@ impl AuditSystem {
let entity_target = EntityTarget {
object_name: entry.api.name.clone().unwrap_or_default(),
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
event_name: rustfs_targets::EventName::ObjectCreatedPut,
event_name: entry.event,
data: (*entry).clone(),
};
match target.save(Arc::new(entity_target)).await {
@@ -388,7 +388,7 @@ impl AuditSystem {
Err(e) => errors.push(e),
}
}
(target_id_clone, success_count, errors)
(target_key_clone, success_count, errors)
};
tasks.push(task);
}
@@ -418,6 +418,7 @@ impl AuditSystem {
}
/// Starts the audit stream processing for a target with batching and retry logic
///
/// # Arguments
/// * `store` - The store from which to read audit entries
/// * `target` - The target to which audit entries will be sent
@@ -501,7 +502,7 @@ impl AuditSystem {
/// Enables a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to enable
/// * `target_id` - The ID of the target to enable, TargetID to string
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
@@ -520,7 +521,7 @@ impl AuditSystem {
/// Disables a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to disable
/// * `target_id` - The ID of the target to disable, TargetID to string
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
@@ -539,7 +540,7 @@ impl AuditSystem {
/// Removes a target from the system
///
/// # Arguments
/// * `target_id` - The ID of the target to remove
/// * `target_id` - The ID of the target to remove, TargetID to string
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
@@ -559,7 +560,7 @@ impl AuditSystem {
/// Updates or inserts a target
///
/// # Arguments
/// * `target_id` - The ID of the target to upsert
/// * `target_id` - The ID of the target to upsert, TargetID to string
/// * `target` - The target instance to insert or update
///
/// # Returns
@@ -596,7 +597,7 @@ impl AuditSystem {
/// Gets information about a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to retrieve
/// * `target_id` - The ID of the target to retrieve, TargetID to string
///
/// # Returns
/// * `Option<String>` - Target ID if found

View File

@@ -96,6 +96,11 @@ pub enum Metric {
ApplyNonCurrent,
HealAbandonedVersion,
// Quota metrics:
QuotaCheck,
QuotaViolation,
QuotaSync,
// START Trace metrics:
StartTrace,
ScanObject, // Scan object. All operations included.
@@ -131,6 +136,9 @@ impl Metric {
Self::CleanAbandoned => "clean_abandoned",
Self::ApplyNonCurrent => "apply_non_current",
Self::HealAbandonedVersion => "heal_abandoned_version",
Self::QuotaCheck => "quota_check",
Self::QuotaViolation => "quota_violation",
Self::QuotaSync => "quota_sync",
Self::StartTrace => "start_trace",
Self::ScanObject => "scan_object",
Self::HealAbandonedObject => "heal_abandoned_object",
@@ -163,15 +171,18 @@ impl Metric {
10 => Some(Self::CleanAbandoned),
11 => Some(Self::ApplyNonCurrent),
12 => Some(Self::HealAbandonedVersion),
13 => Some(Self::StartTrace),
14 => Some(Self::ScanObject),
15 => Some(Self::HealAbandonedObject),
16 => Some(Self::LastRealtime),
17 => Some(Self::ScanFolder),
18 => Some(Self::ScanCycle),
19 => Some(Self::ScanBucketDrive),
20 => Some(Self::CompactFolder),
21 => Some(Self::Last),
13 => Some(Self::QuotaCheck),
14 => Some(Self::QuotaViolation),
15 => Some(Self::QuotaSync),
16 => Some(Self::StartTrace),
17 => Some(Self::ScanObject),
18 => Some(Self::HealAbandonedObject),
19 => Some(Self::LastRealtime),
20 => Some(Self::ScanFolder),
21 => Some(Self::ScanCycle),
22 => Some(Self::ScanBucketDrive),
23 => Some(Self::CompactFolder),
24 => Some(Self::Last),
_ => None,
}
}

View File

@@ -21,6 +21,7 @@ pub(crate) mod heal;
pub(crate) mod object;
pub(crate) mod profiler;
pub(crate) mod protocols;
pub(crate) mod quota;
pub(crate) mod runtime;
pub(crate) mod scanner;
pub(crate) mod targets;

View File

@@ -0,0 +1,26 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub const QUOTA_CONFIG_FILE: &str = "quota.json";
pub const QUOTA_TYPE_HARD: &str = "HARD";
pub const QUOTA_EXCEEDED_ERROR_CODE: &str = "XRustfsQuotaExceeded";
pub const QUOTA_INVALID_CONFIG_ERROR_CODE: &str = "InvalidArgument";
pub const QUOTA_NOT_FOUND_ERROR_CODE: &str = "NoSuchBucket";
pub const QUOTA_INTERNAL_ERROR_CODE: &str = "InternalError";
pub const QUOTA_API_PATH: &str = "/rustfs/admin/v3/quota/{bucket}";
pub const QUOTA_INVALID_TYPE_ERROR_MSG: &str = "Only HARD quota type is supported";
pub const QUOTA_METADATA_SYSTEM_ERROR_MSG: &str = "Bucket metadata system not initialized";

View File

@@ -33,6 +33,8 @@ pub use constants::profiler::*;
#[cfg(feature = "constants")]
pub use constants::protocols::*;
#[cfg(feature = "constants")]
pub use constants::quota::*;
#[cfg(feature = "constants")]
pub use constants::runtime::*;
#[cfg(feature = "constants")]
pub use constants::scanner::*;

View File

@@ -0,0 +1,155 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Regression test for Issue #1423
//! Verifies that Bucket Policies are honored for Authenticated Users.
use crate::common::{RustFSTestEnvironment, init_logging};
use aws_sdk_s3::config::{Credentials, Region};
use aws_sdk_s3::{Client, Config};
use serial_test::serial;
use tracing::info;
async fn create_user(
env: &RustFSTestEnvironment,
username: &str,
password: &str,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let create_user_body = serde_json::json!({
"secretKey": password,
"status": "enabled"
})
.to_string();
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, username);
crate::common::awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
Ok(())
}
fn create_user_client(env: &RustFSTestEnvironment, access_key: &str, secret_key: &str) -> Client {
let credentials = Credentials::new(access_key, secret_key, None, None, "test-user");
let config = Config::builder()
.credentials_provider(credentials)
.region(Region::new("us-east-1"))
.endpoint_url(&env.url)
.force_path_style(true)
.behavior_version_latest()
.build();
Client::from_conf(config)
}
#[tokio::test]
#[serial]
async fn test_bucket_policy_authenticated_user() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("Starting test_bucket_policy_authenticated_user...");
let mut env = RustFSTestEnvironment::new().await?;
env.start_rustfs_server(vec![]).await?;
let admin_client = env.create_s3_client();
let bucket_name = "bucket-policy-auth-test";
let object_key = "test-object.txt";
let user_access = "testuser";
let user_secret = "testpassword";
// 1. Create Bucket (Admin)
admin_client.create_bucket().bucket(bucket_name).send().await?;
// 2. Create User (Admin API)
create_user(&env, user_access, user_secret).await?;
// 3. Create User Client
let user_client = create_user_client(&env, user_access, user_secret);
// 4. Verify Access Denied initially (No Policy)
let result = user_client.list_objects_v2().bucket(bucket_name).send().await;
if result.is_ok() {
return Err("Should be Access Denied initially".into());
}
// 5. Apply Bucket Policy Allowed User
let policy_json = serde_json::json!({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowTestUser",
"Effect": "Allow",
"Principal": {
"AWS": [user_access]
},
"Action": [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
],
"Resource": [
format!("arn:aws:s3:::{}", bucket_name),
format!("arn:aws:s3:::{}/*", bucket_name)
]
}
]
})
.to_string();
admin_client
.put_bucket_policy()
.bucket(bucket_name)
.policy(&policy_json)
.send()
.await?;
// 6. Verify Access Allowed (With Bucket Policy)
info!("Verifying PutObject...");
user_client
.put_object()
.bucket(bucket_name)
.key(object_key)
.body(aws_sdk_s3::primitives::ByteStream::from_static(b"hello world"))
.send()
.await
.map_err(|e| format!("PutObject failed: {}", e))?;
info!("Verifying ListObjects...");
let list_res = user_client
.list_objects_v2()
.bucket(bucket_name)
.send()
.await
.map_err(|e| format!("ListObjects failed: {}", e))?;
assert_eq!(list_res.contents().len(), 1);
info!("Verifying GetObject...");
user_client
.get_object()
.bucket(bucket_name)
.key(object_key)
.send()
.await
.map_err(|e| format!("GetObject failed: {}", e))?;
info!("Verifying DeleteObject...");
user_client
.delete_object()
.bucket(bucket_name)
.key(object_key)
.send()
.await
.map_err(|e| format!("DeleteObject failed: {}", e))?;
info!("Test Passed!");
Ok(())
}

View File

@@ -176,12 +176,14 @@ impl RustFSTestEnvironment {
/// Kill any existing RustFS processes
pub async fn cleanup_existing_processes(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
info!("Cleaning up any existing RustFS processes");
let output = Command::new("pkill").args(["-f", "rustfs"]).output();
let binary_path = rustfs_binary_path();
let binary_name = binary_path.to_string_lossy();
let output = Command::new("pkill").args(["-f", &binary_name]).output();
if let Ok(output) = output
&& output.status.success()
{
info!("Killed existing RustFS processes");
info!("Killed existing RustFS processes: {}", binary_name);
sleep(Duration::from_millis(1000)).await;
}
Ok(())
@@ -363,3 +365,12 @@ pub async fn awscurl_put(
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
execute_awscurl(url, "PUT", Some(body), access_key, secret_key).await
}
/// Helper function for DELETE requests
pub async fn awscurl_delete(
url: &str,
access_key: &str,
secret_key: &str,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
execute_awscurl(url, "DELETE", None, access_key, secret_key).await
}

View File

@@ -29,6 +29,13 @@ mod data_usage_test;
#[cfg(test)]
mod kms;
// Quota tests
#[cfg(test)]
mod quota_test;
#[cfg(test)]
mod bucket_policy_check_test;
// Special characters in path test modules
#[cfg(test)]
mod special_chars_test;

View File

@@ -0,0 +1,798 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::common::{RustFSTestEnvironment, awscurl_delete, awscurl_get, awscurl_post, awscurl_put, init_logging};
use aws_sdk_s3::Client;
use serial_test::serial;
use tracing::{debug, info};
/// Test environment setup for quota tests
pub struct QuotaTestEnv {
pub env: RustFSTestEnvironment,
pub client: Client,
pub bucket_name: String,
}
impl QuotaTestEnv {
pub async fn new() -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
let bucket_name = format!("quota-test-{}", uuid::Uuid::new_v4());
let mut env = RustFSTestEnvironment::new().await?;
env.start_rustfs_server(vec![]).await?;
let client = env.create_s3_client();
Ok(Self {
env,
client,
bucket_name,
})
}
pub async fn create_bucket(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
self.env.create_test_bucket(&self.bucket_name).await?;
Ok(())
}
pub async fn cleanup_bucket(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let objects = self.client.list_objects_v2().bucket(&self.bucket_name).send().await?;
for object in objects.contents() {
self.client
.delete_object()
.bucket(&self.bucket_name)
.key(object.key().unwrap_or_default())
.send()
.await?;
}
self.env.delete_test_bucket(&self.bucket_name).await?;
Ok(())
}
pub async fn set_bucket_quota(&self, quota_bytes: u64) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
let quota_config = serde_json::json!({
"quota": quota_bytes,
"quota_type": "HARD"
});
let response = awscurl_put(&url, &quota_config.to_string(), &self.env.access_key, &self.env.secret_key).await?;
if response.contains("error") {
Err(format!("Failed to set quota: {}", response).into())
} else {
Ok(())
}
}
pub async fn get_bucket_quota(&self) -> Result<Option<u64>, Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
if response.contains("error") {
Err(format!("Failed to get quota: {}", response).into())
} else {
let quota_info: serde_json::Value = serde_json::from_str(&response)?;
Ok(quota_info.get("quota").and_then(|v| v.as_u64()))
}
}
pub async fn clear_bucket_quota(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
let response = awscurl_delete(&url, &self.env.access_key, &self.env.secret_key).await?;
if response.contains("error") {
Err(format!("Failed to clear quota: {}", response).into())
} else {
Ok(())
}
}
pub async fn get_bucket_quota_stats(&self) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/quota-stats/{}", self.env.url, self.bucket_name);
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
if response.contains("error") {
Err(format!("Failed to get quota stats: {}", response).into())
} else {
Ok(serde_json::from_str(&response)?)
}
}
pub async fn check_bucket_quota(
&self,
operation_type: &str,
operation_size: u64,
) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/quota-check/{}", self.env.url, self.bucket_name);
let check_request = serde_json::json!({
"operation_type": operation_type,
"operation_size": operation_size
});
let response = awscurl_post(&url, &check_request.to_string(), &self.env.access_key, &self.env.secret_key).await?;
if response.contains("error") {
Err(format!("Failed to check quota: {}", response).into())
} else {
Ok(serde_json::from_str(&response)?)
}
}
pub async fn upload_object(&self, key: &str, size_bytes: usize) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let data = vec![0u8; size_bytes];
self.client
.put_object()
.bucket(&self.bucket_name)
.key(key)
.body(aws_sdk_s3::primitives::ByteStream::from(data))
.send()
.await?;
Ok(())
}
pub async fn object_exists(&self, key: &str) -> Result<bool, Box<dyn std::error::Error + Send + Sync>> {
match self.client.head_object().bucket(&self.bucket_name).key(key).send().await {
Ok(_) => Ok(true),
Err(e) => {
// Check for any 404-related errors and return false instead of propagating
let error_str = e.to_string();
if error_str.contains("404") || error_str.contains("Not Found") || error_str.contains("NotFound") {
Ok(false)
} else {
// Also check the error code directly
if let Some(service_err) = e.as_service_error()
&& service_err.is_not_found()
{
return Ok(false);
}
Err(e.into())
}
}
}
}
pub async fn get_bucket_usage(&self) -> Result<u64, Box<dyn std::error::Error + Send + Sync>> {
let stats = self.get_bucket_quota_stats().await?;
Ok(stats.get("current_usage").and_then(|v| v.as_u64()).unwrap_or(0))
}
pub async fn set_bucket_quota_for(
&self,
bucket: &str,
quota_bytes: u64,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, bucket);
let quota_config = serde_json::json!({
"quota": quota_bytes,
"quota_type": "HARD"
});
let response = awscurl_put(&url, &quota_config.to_string(), &self.env.access_key, &self.env.secret_key).await?;
if response.contains("error") {
Err(format!("Failed to set quota: {}", response).into())
} else {
Ok(())
}
}
/// Get bucket quota statistics for specific bucket
pub async fn get_bucket_quota_stats_for(
&self,
bucket: &str,
) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
debug!("Getting quota stats for bucket: {}", bucket);
let url = format!("{}/rustfs/admin/v3/quota-stats/{}", self.env.url, bucket);
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
if response.contains("error") {
Err(format!("Failed to get quota stats: {}", response).into())
} else {
let stats: serde_json::Value = serde_json::from_str(&response)?;
Ok(stats)
}
}
/// Upload an object to specific bucket
pub async fn upload_object_to_bucket(
&self,
bucket: &str,
key: &str,
size_bytes: usize,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
debug!("Uploading object {} with size {} bytes to bucket {}", key, size_bytes, bucket);
let data = vec![0u8; size_bytes];
self.client
.put_object()
.bucket(bucket)
.key(key)
.body(aws_sdk_s3::primitives::ByteStream::from(data))
.send()
.await?;
info!("Successfully uploaded object: {} ({} bytes) to bucket: {}", key, size_bytes, bucket);
Ok(())
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
#[tokio::test]
#[serial]
async fn test_quota_basic_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
// Create test bucket
env.create_bucket().await?;
// Set quota of 1MB
env.set_bucket_quota(1024 * 1024).await?;
// Verify quota is set
let quota = env.get_bucket_quota().await?;
assert_eq!(quota, Some(1024 * 1024));
// Upload a 512KB object (should succeed)
env.upload_object("test1.txt", 512 * 1024).await?;
assert!(env.object_exists("test1.txt").await?);
// Upload another 512KB object (should succeed, total 1MB)
env.upload_object("test2.txt", 512 * 1024).await?;
assert!(env.object_exists("test2.txt").await?);
// Try to upload 1KB more (should fail due to quota)
let upload_result = env.upload_object("test3.txt", 1024).await;
assert!(upload_result.is_err());
assert!(!env.object_exists("test3.txt").await?);
// Clean up
env.clear_bucket_quota().await?;
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_update_and_clear() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Set initial quota
env.set_bucket_quota(512 * 1024).await?;
assert_eq!(env.get_bucket_quota().await?, Some(512 * 1024));
// Update quota to larger size
env.set_bucket_quota(2 * 1024 * 1024).await?;
assert_eq!(env.get_bucket_quota().await?, Some(2 * 1024 * 1024));
// Upload 1MB object (should succeed with new quota)
env.upload_object("large_file.txt", 1024 * 1024).await?;
assert!(env.object_exists("large_file.txt").await?);
// Clear quota
env.clear_bucket_quota().await?;
assert_eq!(env.get_bucket_quota().await?, None);
// Upload another large object (should succeed with no quota)
env.upload_object("unlimited_file.txt", 5 * 1024 * 1024).await?;
assert!(env.object_exists("unlimited_file.txt").await?);
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_delete_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Set quota of 1MB
env.set_bucket_quota(1024 * 1024).await?;
// Fill up to quota limit
env.upload_object("file1.txt", 512 * 1024).await?;
env.upload_object("file2.txt", 512 * 1024).await?;
// Delete one file
env.client
.delete_object()
.bucket(&env.bucket_name)
.key("file1.txt")
.send()
.await?;
assert!(!env.object_exists("file1.txt").await?);
// Now we should be able to upload again (quota freed up)
env.upload_object("file3.txt", 256 * 1024).await?;
assert!(env.object_exists("file3.txt").await?);
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_usage_tracking() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Set quota
env.set_bucket_quota(2 * 1024 * 1024).await?;
// Upload some files
env.upload_object("file1.txt", 512 * 1024).await?;
env.upload_object("file2.txt", 256 * 1024).await?;
// Check usage
let usage = env.get_bucket_usage().await?;
assert_eq!(usage, (512 + 256) * 1024);
// Delete a file
env.client
.delete_object()
.bucket(&env.bucket_name)
.key("file1.txt")
.send()
.await?;
// Check updated usage
let updated_usage = env.get_bucket_usage().await?;
assert_eq!(updated_usage, 256 * 1024);
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_statistics() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Set quota of 2MB
env.set_bucket_quota(2 * 1024 * 1024).await?;
// Upload files to use 1.5MB
env.upload_object("file1.txt", 1024 * 1024).await?;
env.upload_object("file2.txt", 512 * 1024).await?;
// Get detailed quota statistics
let stats = env.get_bucket_quota_stats().await?;
assert_eq!(stats.get("bucket").unwrap().as_str().unwrap(), env.bucket_name);
assert_eq!(stats.get("quota_limit").unwrap().as_u64().unwrap(), 2 * 1024 * 1024);
assert_eq!(stats.get("current_usage").unwrap().as_u64().unwrap(), (1024 + 512) * 1024);
assert_eq!(stats.get("remaining_quota").unwrap().as_u64().unwrap(), 512 * 1024);
let usage_percentage = stats.get("usage_percentage").unwrap().as_f64().unwrap();
assert!((usage_percentage - 75.0).abs() < 0.1);
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_check_api() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Set quota of 1MB
env.set_bucket_quota(1024 * 1024).await?;
// Upload 512KB file
env.upload_object("existing_file.txt", 512 * 1024).await?;
// Check if we can upload another 512KB (should succeed, exactly fill quota)
let check_result = env.check_bucket_quota("PUT", 512 * 1024).await?;
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
assert_eq!(check_result.get("remaining_quota").unwrap().as_u64().unwrap(), 0);
// Note: we haven't actually uploaded the second file yet, so current_usage is still 512KB
// Check if we can upload 1KB (should succeed - we haven't used the full quota yet)
let check_result = env.check_bucket_quota("PUT", 1024).await?;
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
assert_eq!(check_result.get("remaining_quota").unwrap().as_u64().unwrap(), 512 * 1024 - 1024);
// Check if we can upload 600KB (should fail - would exceed quota)
let check_result = env.check_bucket_quota("PUT", 600 * 1024).await?;
assert!(!check_result.get("allowed").unwrap().as_bool().unwrap());
// Check delete operation (should always be allowed)
let check_result = env.check_bucket_quota("DELETE", 512 * 1024).await?;
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_multiple_buckets() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
// Create two buckets in the same environment
let bucket1 = format!("quota-test-{}-1", uuid::Uuid::new_v4());
let bucket2 = format!("quota-test-{}-2", uuid::Uuid::new_v4());
env.env.create_test_bucket(&bucket1).await?;
env.env.create_test_bucket(&bucket2).await?;
// Set different quotas for each bucket
env.set_bucket_quota_for(&bucket1, 1024 * 1024).await?; // 1MB
env.set_bucket_quota_for(&bucket2, 2 * 1024 * 1024).await?; // 2MB
// Fill first bucket to quota
env.upload_object_to_bucket(&bucket1, "big_file.txt", 1024 * 1024).await?;
// Should still be able to upload to second bucket
env.upload_object_to_bucket(&bucket2, "big_file.txt", 1024 * 1024).await?;
env.upload_object_to_bucket(&bucket2, "another_file.txt", 512 * 1024).await?;
// Verify statistics are independent
let stats1 = env.get_bucket_quota_stats_for(&bucket1).await?;
let stats2 = env.get_bucket_quota_stats_for(&bucket2).await?;
assert_eq!(stats1.get("current_usage").unwrap().as_u64().unwrap(), 1024 * 1024);
assert_eq!(stats2.get("current_usage").unwrap().as_u64().unwrap(), (1024 + 512) * 1024);
// Clean up
env.env.delete_test_bucket(&bucket1).await?;
env.env.delete_test_bucket(&bucket2).await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_error_handling() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Test invalid quota type
let url = format!("{}/rustfs/admin/v3/quota/{}", env.env.url, env.bucket_name);
let invalid_config = serde_json::json!({
"quota": 1024,
"quota_type": "SOFT" // Invalid type
});
let response = awscurl_put(&url, &invalid_config.to_string(), &env.env.access_key, &env.env.secret_key).await;
assert!(response.is_err());
let error_msg = response.unwrap_err().to_string();
assert!(error_msg.contains("InvalidArgument"));
// Test operations on non-existent bucket
let url = format!("{}/rustfs/admin/v3/quota/non-existent-bucket", env.env.url);
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await;
assert!(response.is_err());
let error_msg = response.unwrap_err().to_string();
assert!(error_msg.contains("NoSuchBucket"));
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_http_endpoints() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Test 1: GET quota for bucket without quota config
let url = format!("{}/rustfs/admin/v3/quota/{}", env.env.url, env.bucket_name);
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
assert!(response.contains("quota") && response.contains("null"));
// Test 2: PUT quota - valid config
let quota_config = serde_json::json!({
"quota": 1048576,
"quota_type": "HARD"
});
let response = awscurl_put(&url, &quota_config.to_string(), &env.env.access_key, &env.env.secret_key).await?;
assert!(response.contains("success") || !response.contains("error"));
// Test 3: GET quota after setting
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
assert!(response.contains("1048576"));
// Test 4: GET quota stats
let stats_url = format!("{}/rustfs/admin/v3/quota-stats/{}", env.env.url, env.bucket_name);
let response = awscurl_get(&stats_url, &env.env.access_key, &env.env.secret_key).await?;
assert!(response.contains("quota_limit") && response.contains("current_usage"));
// Test 5: POST quota check
let check_url = format!("{}/rustfs/admin/v3/quota-check/{}", env.env.url, env.bucket_name);
let check_request = serde_json::json!({
"operation_type": "PUT",
"operation_size": 1024
});
let response = awscurl_post(&check_url, &check_request.to_string(), &env.env.access_key, &env.env.secret_key).await?;
assert!(response.contains("allowed"));
// Test 6: DELETE quota
let response = awscurl_delete(&url, &env.env.access_key, &env.env.secret_key).await?;
assert!(!response.contains("error"));
// Test 7: GET quota after deletion
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
assert!(response.contains("quota") && response.contains("null"));
// Test 8: Invalid quota type
let invalid_config = serde_json::json!({
"quota": 1024,
"quota_type": "SOFT"
});
let response = awscurl_put(&url, &invalid_config.to_string(), &env.env.access_key, &env.env.secret_key).await;
assert!(response.is_err());
let error_msg = response.unwrap_err().to_string();
assert!(error_msg.contains("InvalidArgument"));
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_copy_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Set quota of 2MB
env.set_bucket_quota(2 * 1024 * 1024).await?;
// Upload initial file
env.upload_object("original.txt", 1024 * 1024).await?;
// Copy file - should succeed (1MB each, total 2MB)
env.client
.copy_object()
.bucket(&env.bucket_name)
.key("copy1.txt")
.copy_source(format!("{}/{}", env.bucket_name, "original.txt"))
.send()
.await?;
assert!(env.object_exists("copy1.txt").await?);
// Try to copy again - should fail (1.5MB each, total 3MB > 2MB quota)
let copy_result = env
.client
.copy_object()
.bucket(&env.bucket_name)
.key("copy2.txt")
.copy_source(format!("{}/{}", env.bucket_name, "original.txt"))
.send()
.await;
assert!(copy_result.is_err());
assert!(!env.object_exists("copy2.txt").await?);
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_batch_delete() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Set quota of 2MB
env.set_bucket_quota(2 * 1024 * 1024).await?;
// Upload files to fill quota
env.upload_object("file1.txt", 1024 * 1024).await?;
env.upload_object("file2.txt", 1024 * 1024).await?;
// Verify quota is full
let upload_result = env.upload_object("file3.txt", 1024).await;
assert!(upload_result.is_err());
// Delete multiple objects using batch delete
let objects = vec![
aws_sdk_s3::types::ObjectIdentifier::builder()
.key("file1.txt")
.build()
.unwrap(),
aws_sdk_s3::types::ObjectIdentifier::builder()
.key("file2.txt")
.build()
.unwrap(),
];
let delete_result = env
.client
.delete_objects()
.bucket(&env.bucket_name)
.delete(
aws_sdk_s3::types::Delete::builder()
.set_objects(Some(objects))
.quiet(true)
.build()
.unwrap(),
)
.send()
.await?;
assert_eq!(delete_result.deleted().len(), 2);
// Now should be able to upload again (quota freed up)
env.upload_object("file3.txt", 256 * 1024).await?;
assert!(env.object_exists("file3.txt").await?);
env.cleanup_bucket().await?;
Ok(())
}
#[tokio::test]
#[serial]
async fn test_quota_multipart_upload() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let env = QuotaTestEnv::new().await?;
env.create_bucket().await?;
// Set quota of 10MB
env.set_bucket_quota(10 * 1024 * 1024).await?;
let key = "multipart_test.txt";
let part_size = 5 * 1024 * 1024; // 5MB minimum per part (S3 requirement)
// Test 1: Multipart upload within quota (single 5MB part)
let create_result = env
.client
.create_multipart_upload()
.bucket(&env.bucket_name)
.key(key)
.send()
.await?;
let upload_id = create_result.upload_id().unwrap();
// Upload single 5MB part (S3 allows single part with any size ≥ 5MB for the only part)
let part_data = vec![1u8; part_size];
let part_result = env
.client
.upload_part()
.bucket(&env.bucket_name)
.key(key)
.upload_id(upload_id)
.part_number(1)
.body(aws_sdk_s3::primitives::ByteStream::from(part_data))
.send()
.await?;
let uploaded_parts = vec![
aws_sdk_s3::types::CompletedPart::builder()
.part_number(1)
.e_tag(part_result.e_tag().unwrap())
.build(),
];
env.client
.complete_multipart_upload()
.bucket(&env.bucket_name)
.key(key)
.upload_id(upload_id)
.multipart_upload(
aws_sdk_s3::types::CompletedMultipartUpload::builder()
.set_parts(Some(uploaded_parts))
.build(),
)
.send()
.await?;
assert!(env.object_exists(key).await?);
// Test 2: Multipart upload exceeds quota (should fail)
// Upload 6MB filler (total now: 5MB + 6MB = 11MB > 10MB quota)
let upload_filler = env.upload_object("filler.txt", 6 * 1024 * 1024).await;
// This should fail due to quota
assert!(upload_filler.is_err());
// Verify filler doesn't exist
assert!(!env.object_exists("filler.txt").await?);
// Now try a multipart upload that exceeds quota
// Current usage: 5MB (from Test 1), quota: 10MB
// Trying to upload 6MB via multipart → should fail
let create_result2 = env
.client
.create_multipart_upload()
.bucket(&env.bucket_name)
.key("over_quota.txt")
.send()
.await?;
let upload_id2 = create_result2.upload_id().unwrap();
let mut uploaded_parts2 = vec![];
for part_num in 1..=2 {
let part_data = vec![part_num as u8; part_size];
let part_result = env
.client
.upload_part()
.bucket(&env.bucket_name)
.key("over_quota.txt")
.upload_id(upload_id2)
.part_number(part_num)
.body(aws_sdk_s3::primitives::ByteStream::from(part_data))
.send()
.await?;
uploaded_parts2.push(
aws_sdk_s3::types::CompletedPart::builder()
.part_number(part_num)
.e_tag(part_result.e_tag().unwrap())
.build(),
);
}
let complete_result = env
.client
.complete_multipart_upload()
.bucket(&env.bucket_name)
.key("over_quota.txt")
.upload_id(upload_id2)
.multipart_upload(
aws_sdk_s3::types::CompletedMultipartUpload::builder()
.set_parts(Some(uploaded_parts2))
.build(),
)
.send()
.await;
assert!(complete_result.is_err());
assert!(!env.object_exists("over_quota.txt").await?);
env.cleanup_bucket().await?;
Ok(())
}
}

View File

@@ -48,6 +48,7 @@ async-trait.workspace = true
bytes.workspace = true
byteorder = { workspace = true }
chrono.workspace = true
dunce.workspace = true
glob = { workspace = true }
thiserror.workspace = true
flatbuffers.workspace = true
@@ -109,7 +110,6 @@ google-cloud-auth = { workspace = true }
aws-config = { workspace = true }
faster-hex = { workspace = true }
[dev-dependencies]
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
criterion = { workspace = true, features = ["html_reports"] }

View File

@@ -355,7 +355,7 @@ impl BucketMetadata {
self.tagging_config = Some(deserialize::<Tagging>(&self.tagging_config_xml)?);
}
if !self.quota_config_json.is_empty() {
self.quota_config = Some(BucketQuota::unmarshal(&self.quota_config_json)?);
self.quota_config = Some(serde_json::from_slice(&self.quota_config_json)?);
}
if !self.replication_config_xml.is_empty() {
self.replication_config = Some(deserialize::<ReplicationConfiguration>(&self.replication_config_xml)?);
@@ -487,7 +487,8 @@ mod test {
bm.tagging_config_updated_at = OffsetDateTime::now_utc();
// Add quota configuration
let quota_json = r#"{"quota":1073741824,"quotaType":"hard"}"#; // 1GB quota
let quota_json =
r#"{"quota":1073741824,"quota_type":"Hard","created_at":"2024-01-01T00:00:00Z","updated_at":"2024-01-01T00:00:00Z"}"#; // 1GB quota
bm.quota_config_json = quota_json.as_bytes().to_vec();
bm.quota_config_updated_at = OffsetDateTime::now_utc();

View File

@@ -0,0 +1,195 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{BucketQuota, QuotaCheckResult, QuotaError, QuotaOperation};
use crate::bucket::metadata_sys::{BucketMetadataSys, update};
use crate::data_usage::get_bucket_usage_memory;
use rustfs_common::metrics::Metric;
use rustfs_config::QUOTA_CONFIG_FILE;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::RwLock;
use tracing::{debug, warn};
pub struct QuotaChecker {
metadata_sys: Arc<RwLock<BucketMetadataSys>>,
}
impl QuotaChecker {
pub fn new(metadata_sys: Arc<RwLock<BucketMetadataSys>>) -> Self {
Self { metadata_sys }
}
pub async fn check_quota(
&self,
bucket: &str,
operation: QuotaOperation,
operation_size: u64,
) -> Result<QuotaCheckResult, QuotaError> {
let start_time = Instant::now();
let quota_config = self.get_quota_config(bucket).await?;
// If no quota limit is set, allow operation
let quota_limit = match quota_config.quota {
None => {
let current_usage = self.get_real_time_usage(bucket).await?;
return Ok(QuotaCheckResult {
allowed: true,
current_usage,
quota_limit: None,
operation_size,
remaining: None,
});
}
Some(q) => q,
};
let current_usage = self.get_real_time_usage(bucket).await?;
let expected_usage = match operation {
QuotaOperation::PutObject | QuotaOperation::CopyObject => current_usage + operation_size,
QuotaOperation::DeleteObject => current_usage.saturating_sub(operation_size),
};
let allowed = match operation {
QuotaOperation::PutObject | QuotaOperation::CopyObject => {
quota_config.check_operation_allowed(current_usage, operation_size)
}
QuotaOperation::DeleteObject => true,
};
let remaining = if quota_limit >= expected_usage {
Some(quota_limit - expected_usage)
} else {
Some(0)
};
if !allowed {
warn!(
"Quota exceeded for bucket: {}, current: {}, limit: {}, attempted: {}",
bucket, current_usage, quota_limit, operation_size
);
}
let result = QuotaCheckResult {
allowed,
current_usage,
quota_limit: Some(quota_limit),
operation_size,
remaining,
};
let duration = start_time.elapsed();
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaCheck, duration).await;
if !allowed {
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaViolation, duration).await;
}
Ok(result)
}
pub async fn get_quota_config(&self, bucket: &str) -> Result<BucketQuota, QuotaError> {
let meta = self
.metadata_sys
.read()
.await
.get(bucket)
.await
.map_err(QuotaError::StorageError)?;
if meta.quota_config_json.is_empty() {
debug!("No quota config found for bucket: {}, using default", bucket);
return Ok(BucketQuota::new(None));
}
let quota: BucketQuota = serde_json::from_slice(&meta.quota_config_json).map_err(|e| QuotaError::InvalidConfig {
reason: format!("Failed to parse quota config: {}", e),
})?;
Ok(quota)
}
pub async fn set_quota_config(&mut self, bucket: &str, quota: BucketQuota) -> Result<(), QuotaError> {
let json_data = serde_json::to_vec(&quota).map_err(|e| QuotaError::InvalidConfig {
reason: format!("Failed to serialize quota config: {}", e),
})?;
let start_time = Instant::now();
update(bucket, QUOTA_CONFIG_FILE, json_data)
.await
.map_err(QuotaError::StorageError)?;
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaSync, start_time.elapsed()).await;
Ok(())
}
pub async fn get_quota_stats(&self, bucket: &str) -> Result<(BucketQuota, Option<u64>), QuotaError> {
// If bucket doesn't exist, return ConfigNotFound error
if !self.bucket_exists(bucket).await {
return Err(QuotaError::ConfigNotFound {
bucket: bucket.to_string(),
});
}
let quota = self.get_quota_config(bucket).await?;
let current_usage = self.get_real_time_usage(bucket).await.unwrap_or(0);
Ok((quota, Some(current_usage)))
}
pub async fn bucket_exists(&self, bucket: &str) -> bool {
self.metadata_sys.read().await.get(bucket).await.is_ok()
}
pub async fn get_real_time_usage(&self, bucket: &str) -> Result<u64, QuotaError> {
Ok(get_bucket_usage_memory(bucket).await.unwrap_or(0))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_quota_check_no_limit() {
let result = QuotaCheckResult {
allowed: true,
current_usage: 0,
quota_limit: None,
operation_size: 1024,
remaining: None,
};
assert!(result.allowed);
assert_eq!(result.quota_limit, None);
}
#[tokio::test]
async fn test_quota_check_within_limit() {
let quota = BucketQuota::new(Some(2048)); // 2KB
// Current usage 512, trying to add 1024
let allowed = quota.check_operation_allowed(512, 1024);
assert!(allowed);
}
#[tokio::test]
async fn test_quota_check_exceeds_limit() {
let quota = BucketQuota::new(Some(1024)); // 1KB
// Current usage 512, trying to add 1024
let allowed = quota.check_operation_allowed(512, 1024);
assert!(!allowed);
}
}

View File

@@ -12,36 +12,37 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod checker;
use crate::error::Result;
use rmp_serde::Serializer as rmpSerializer;
use rustfs_config::{
QUOTA_API_PATH, QUOTA_EXCEEDED_ERROR_CODE, QUOTA_INTERNAL_ERROR_CODE, QUOTA_INVALID_CONFIG_ERROR_CODE,
QUOTA_NOT_FOUND_ERROR_CODE,
};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use time::OffsetDateTime;
// Define the QuotaType enum
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum QuotaType {
/// Hard quota: reject immediately when exceeded
#[default]
Hard,
}
// Define the BucketQuota structure
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq)]
pub struct BucketQuota {
quota: Option<u64>, // Use Option to represent optional fields
size: u64,
rate: u64,
requests: u64,
quota_type: Option<QuotaType>,
pub quota: Option<u64>,
pub quota_type: QuotaType,
/// Timestamp when this quota configuration was set (for audit purposes)
pub created_at: Option<OffsetDateTime>,
}
impl BucketQuota {
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut buf = Vec::new();
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
Ok(buf)
}
@@ -49,4 +50,107 @@ impl BucketQuota {
let t: BucketQuota = rmp_serde::from_slice(buf)?;
Ok(t)
}
pub fn new(quota: Option<u64>) -> Self {
let now = OffsetDateTime::now_utc();
Self {
quota,
quota_type: QuotaType::Hard,
created_at: Some(now),
}
}
pub fn get_quota_limit(&self) -> Option<u64> {
self.quota
}
pub fn check_operation_allowed(&self, current_usage: u64, operation_size: u64) -> bool {
if let Some(quota_limit) = self.quota {
current_usage.saturating_add(operation_size) <= quota_limit
} else {
true // No quota limit
}
}
pub fn get_remaining_quota(&self, current_usage: u64) -> Option<u64> {
self.quota.map(|limit| limit.saturating_sub(current_usage))
}
}
#[derive(Debug)]
pub struct QuotaCheckResult {
pub allowed: bool,
pub current_usage: u64,
/// quota_limit: None means unlimited
pub quota_limit: Option<u64>,
pub operation_size: u64,
pub remaining: Option<u64>,
}
#[derive(Debug)]
pub enum QuotaOperation {
PutObject,
CopyObject,
DeleteObject,
}
#[derive(Debug, Error)]
pub enum QuotaError {
#[error("Bucket quota exceeded: current={current}, limit={limit}, operation={operation}")]
QuotaExceeded { current: u64, limit: u64, operation: u64 },
#[error("Quota configuration not found for bucket: {bucket}")]
ConfigNotFound { bucket: String },
#[error("Invalid quota configuration: {reason}")]
InvalidConfig { reason: String },
#[error("Storage error: {0}")]
StorageError(#[from] crate::error::StorageError),
}
#[derive(Debug, Serialize)]
pub struct QuotaErrorResponse {
#[serde(rename = "Code")]
pub code: String,
#[serde(rename = "Message")]
pub message: String,
#[serde(rename = "Resource")]
pub resource: String,
#[serde(rename = "RequestId")]
pub request_id: String,
#[serde(rename = "HostId")]
pub host_id: String,
}
impl QuotaErrorResponse {
pub fn new(quota_error: &QuotaError, request_id: &str, host_id: &str) -> Self {
match quota_error {
QuotaError::QuotaExceeded { .. } => Self {
code: QUOTA_EXCEEDED_ERROR_CODE.to_string(),
message: quota_error.to_string(),
resource: QUOTA_API_PATH.to_string(),
request_id: request_id.to_string(),
host_id: host_id.to_string(),
},
QuotaError::ConfigNotFound { .. } => Self {
code: QUOTA_NOT_FOUND_ERROR_CODE.to_string(),
message: quota_error.to_string(),
resource: QUOTA_API_PATH.to_string(),
request_id: request_id.to_string(),
host_id: host_id.to_string(),
},
QuotaError::InvalidConfig { .. } => Self {
code: QUOTA_INVALID_CONFIG_ERROR_CODE.to_string(),
message: quota_error.to_string(),
resource: QUOTA_API_PATH.to_string(),
request_id: request_id.to_string(),
host_id: host_id.to_string(),
},
QuotaError::StorageError(_) => Self {
code: QUOTA_INTERNAL_ERROR_CODE.to_string(),
message: quota_error.to_string(),
resource: QUOTA_API_PATH.to_string(),
request_id: request_id.to_string(),
host_id: host_id.to_string(),
},
}
}
}

View File

@@ -14,7 +14,7 @@
use crate::disk::RUSTFS_META_BUCKET;
use crate::error::{Error, Result, StorageError};
use rustfs_utils::path::SLASH_SEPARATOR;
use rustfs_utils::path::SLASH_SEPARATOR_STR;
use s3s::xml;
pub fn is_meta_bucketname(name: &str) -> bool {
@@ -194,7 +194,7 @@ pub fn is_valid_object_name(object: &str) -> bool {
return false;
}
if object.ends_with(SLASH_SEPARATOR) {
if object.ends_with(SLASH_SEPARATOR_STR) {
return false;
}
@@ -206,7 +206,7 @@ pub fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Res
return Err(StorageError::ObjectNameTooLong(bucket.to_owned(), object.to_owned()));
}
if object.starts_with(SLASH_SEPARATOR) {
if object.starts_with(SLASH_SEPARATOR_STR) {
return Err(StorageError::ObjectNamePrefixAsSlash(bucket.to_owned(), object.to_owned()));
}

View File

@@ -18,7 +18,7 @@ use crate::error::{Error, Result};
use crate::store_api::{ObjectInfo, ObjectOptions, PutObjReader, StorageAPI};
use http::HeaderMap;
use rustfs_config::DEFAULT_DELIMITER;
use rustfs_utils::path::SLASH_SEPARATOR;
use rustfs_utils::path::SLASH_SEPARATOR_STR;
use std::collections::HashSet;
use std::sync::Arc;
use std::sync::LazyLock;
@@ -29,7 +29,7 @@ const CONFIG_FILE: &str = "config.json";
pub const STORAGE_CLASS_SUB_SYS: &str = "storage_class";
static CONFIG_BUCKET: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR}{CONFIG_PREFIX}"));
static CONFIG_BUCKET: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR_STR}{CONFIG_PREFIX}"));
static SUB_SYSTEMS_DYNAMIC: LazyLock<HashSet<String>> = LazyLock::new(|| {
let mut h = HashSet::new();
@@ -129,7 +129,7 @@ async fn new_and_save_server_config<S: StorageAPI>(api: Arc<S>) -> Result<Config
}
fn get_config_file() -> String {
format!("{CONFIG_PREFIX}{SLASH_SEPARATOR}{CONFIG_FILE}")
format!("{CONFIG_PREFIX}{SLASH_SEPARATOR_STR}{CONFIG_FILE}")
}
/// Handle the situation where the configuration file does not exist, create and save a new configuration

View File

@@ -15,8 +15,10 @@
use std::{
collections::{HashMap, hash_map::Entry},
sync::Arc,
time::SystemTime,
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tracing::debug;
pub mod local_snapshot;
pub use local_snapshot::{
@@ -31,33 +33,49 @@ use crate::{
use rustfs_common::data_usage::{
BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary,
};
use rustfs_utils::path::SLASH_SEPARATOR;
use rustfs_utils::path::SLASH_SEPARATOR_STR;
use std::sync::OnceLock;
use tokio::fs;
use tracing::{error, info, warn};
use crate::error::Error;
// Data usage storage constants
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR_STR;
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
const DATA_USAGE_CACHE_TTL_SECS: u64 = 30;
type UsageMemoryCache = Arc<RwLock<HashMap<String, (u64, SystemTime)>>>;
type CacheUpdating = Arc<RwLock<bool>>;
static USAGE_MEMORY_CACHE: OnceLock<UsageMemoryCache> = OnceLock::new();
static USAGE_CACHE_UPDATING: OnceLock<CacheUpdating> = OnceLock::new();
fn memory_cache() -> &'static UsageMemoryCache {
USAGE_MEMORY_CACHE.get_or_init(|| Arc::new(RwLock::new(HashMap::new())))
}
fn cache_updating() -> &'static CacheUpdating {
USAGE_CACHE_UPDATING.get_or_init(|| Arc::new(RwLock::new(false)))
}
// Data usage storage paths
lazy_static::lazy_static! {
pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}",
crate::disk::RUSTFS_META_BUCKET,
SLASH_SEPARATOR,
SLASH_SEPARATOR_STR,
crate::disk::BUCKET_META_PREFIX
);
pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}",
crate::disk::BUCKET_META_PREFIX,
SLASH_SEPARATOR,
SLASH_SEPARATOR_STR,
DATA_USAGE_OBJ_NAME
);
pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}",
crate::disk::BUCKET_META_PREFIX,
SLASH_SEPARATOR,
SLASH_SEPARATOR_STR,
DATA_USAGE_BLOOM_NAME
);
}
@@ -364,8 +382,120 @@ pub async fn compute_bucket_usage(store: Arc<ECStore>, bucket_name: &str) -> Res
Ok(usage)
}
/// Fast in-memory increment for immediate quota consistency
pub async fn increment_bucket_usage_memory(bucket: &str, size_increment: u64) {
let mut cache = memory_cache().write().await;
let current = cache.entry(bucket.to_string()).or_insert_with(|| (0, SystemTime::now()));
current.0 += size_increment;
current.1 = SystemTime::now();
}
/// Fast in-memory decrement for immediate quota consistency
pub async fn decrement_bucket_usage_memory(bucket: &str, size_decrement: u64) {
let mut cache = memory_cache().write().await;
if let Some(current) = cache.get_mut(bucket) {
current.0 = current.0.saturating_sub(size_decrement);
current.1 = SystemTime::now();
}
}
/// Get bucket usage from in-memory cache
pub async fn get_bucket_usage_memory(bucket: &str) -> Option<u64> {
update_usage_cache_if_needed().await;
let cache = memory_cache().read().await;
cache.get(bucket).map(|(usage, _)| *usage)
}
async fn update_usage_cache_if_needed() {
let ttl = Duration::from_secs(DATA_USAGE_CACHE_TTL_SECS);
let double_ttl = ttl * 2;
let now = SystemTime::now();
let cache = memory_cache().read().await;
let earliest_timestamp = cache.values().map(|(_, ts)| *ts).min();
drop(cache);
let age = match earliest_timestamp {
Some(ts) => now.duration_since(ts).unwrap_or_default(),
None => double_ttl,
};
if age < ttl {
return;
}
let mut updating = cache_updating().write().await;
if age < double_ttl {
if *updating {
return;
}
*updating = true;
drop(updating);
let cache_clone = (*memory_cache()).clone();
let updating_clone = (*cache_updating()).clone();
tokio::spawn(async move {
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get()
&& let Ok(data_usage_info) = load_data_usage_from_backend(store.clone()).await
{
let mut cache = cache_clone.write().await;
for (bucket_name, bucket_usage) in data_usage_info.buckets_usage.iter() {
cache.insert(bucket_name.clone(), (bucket_usage.size, SystemTime::now()));
}
}
let mut updating = updating_clone.write().await;
*updating = false;
});
return;
}
for retry in 0..10 {
if !*updating {
break;
}
drop(updating);
let delay = Duration::from_millis(1 << retry);
tokio::time::sleep(delay).await;
updating = cache_updating().write().await;
}
*updating = true;
drop(updating);
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get()
&& let Ok(data_usage_info) = load_data_usage_from_backend(store.clone()).await
{
let mut cache = memory_cache().write().await;
for (bucket_name, bucket_usage) in data_usage_info.buckets_usage.iter() {
cache.insert(bucket_name.clone(), (bucket_usage.size, SystemTime::now()));
}
}
let mut updating = cache_updating().write().await;
*updating = false;
}
/// Sync memory cache with backend data (called by scanner)
pub async fn sync_memory_cache_with_backend() -> Result<(), Error> {
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get() {
match load_data_usage_from_backend(store.clone()).await {
Ok(data_usage_info) => {
let mut cache = memory_cache().write().await;
for (bucket, bucket_usage) in data_usage_info.buckets_usage.iter() {
cache.insert(bucket.clone(), (bucket_usage.size, SystemTime::now()));
}
}
Err(e) => {
debug!("Failed to sync memory cache with backend: {}", e);
}
}
}
Ok(())
}
/// Build basic data usage info with real object counts
async fn build_basic_data_usage_info(store: Arc<ECStore>) -> Result<DataUsageInfo, Error> {
pub async fn build_basic_data_usage_info(store: Arc<ECStore>) -> Result<DataUsageInfo, Error> {
let mut data_usage_info = DataUsageInfo::default();
// Get bucket list

View File

@@ -12,39 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use super::error::{Error, Result};
use super::os::{is_root_disk, rename_all};
use super::{
BUCKET_META_PREFIX, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics,
FileInfoVersions, RUSTFS_META_BUCKET, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp,
STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, os,
};
use super::{endpoint::Endpoint, error::DiskError, format::FormatV3};
use crate::config::storageclass::DEFAULT_INLINE_BLOCK;
use crate::data_usage::local_snapshot::ensure_data_usage_layout;
use crate::disk::error::FileAccessDeniedWithContext;
use crate::disk::error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error};
use crate::disk::fs::{
O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename,
};
use crate::disk::os::{check_path_length, is_empty_dir};
use crate::disk::{
CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN, CHECK_PART_VOLUME_NOT_FOUND,
FileReader, RUSTFS_META_TMP_DELETED_BUCKET, conv_part_err_to_int,
BUCKET_META_PREFIX, CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN,
CHECK_PART_VOLUME_NOT_FOUND, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics,
FileInfoVersions, FileReader, FileWriter, RUSTFS_META_BUCKET, RUSTFS_META_TMP_DELETED_BUCKET, ReadMultipleReq,
ReadMultipleResp, ReadOptions, RenameDataResp, STORAGE_FORMAT_FILE, STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts,
VolumeInfo, WalkDirOptions, conv_part_err_to_int,
endpoint::Endpoint,
error::{DiskError, Error, FileAccessDeniedWithContext, Result},
error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error},
format::FormatV3,
fs::{O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename},
os,
os::{check_path_length, is_empty_dir, is_root_disk, rename_all},
};
use crate::disk::{FileWriter, STORAGE_FORMAT_FILE};
use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold};
use rustfs_utils::path::{
GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR, clean, decode_dir_object, encode_dir_object, has_suffix,
path_join, path_join_buf,
};
use tokio::time::interval;
use crate::erasure_coding::bitrot_verify;
use bytes::Bytes;
// use path_absolutize::Absolutize; // Replaced with direct path operations for better performance
use crate::file_cache::{get_global_file_cache, prefetch_metadata_patterns, read_metadata_cached};
use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold};
use bytes::Bytes;
use parking_lot::RwLock as ParkingLotRwLock;
use rustfs_filemeta::{
Cache, FileInfo, FileInfoOpts, FileMeta, MetaCacheEntry, MetacacheWriter, ObjectPartInfo, Opts, RawFileInfo, UpdateFn,
@@ -52,6 +39,10 @@ use rustfs_filemeta::{
};
use rustfs_utils::HashAlgorithm;
use rustfs_utils::os::get_info;
use rustfs_utils::path::{
GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR_STR, clean, decode_dir_object, encode_dir_object,
has_suffix, path_join, path_join_buf,
};
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt::Debug;
@@ -67,6 +58,7 @@ use time::OffsetDateTime;
use tokio::fs::{self, File};
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt, ErrorKind};
use tokio::sync::RwLock;
use tokio::time::interval;
use tracing::{debug, error, info, warn};
use uuid::Uuid;
@@ -129,7 +121,8 @@ impl LocalDisk {
pub async fn new(ep: &Endpoint, cleanup: bool) -> Result<Self> {
debug!("Creating local disk");
// Use optimized path resolution instead of absolutize() for better performance
let root = match std::fs::canonicalize(ep.get_file_path()) {
// Use dunce::canonicalize instead of std::fs::canonicalize to avoid UNC paths on Windows
let root = match dunce::canonicalize(ep.get_file_path()) {
Ok(path) => path,
Err(e) => {
if e.kind() == ErrorKind::NotFound {
@@ -483,7 +476,7 @@ impl LocalDisk {
// Async prefetch related files, don't block current read
if let Some(parent) = file_path.parent() {
prefetch_metadata_patterns(parent, &[super::STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await;
prefetch_metadata_patterns(parent, &[STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await;
}
// Main read logic
@@ -507,7 +500,7 @@ impl LocalDisk {
async fn read_metadata_batch(&self, requests: Vec<(String, String)>) -> Result<Vec<Option<Arc<FileMeta>>>> {
let paths: Vec<PathBuf> = requests
.iter()
.map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, super::STORAGE_FORMAT_FILE)))
.map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, STORAGE_FORMAT_FILE)))
.collect::<Result<Vec<_>>>()?;
let cache = get_global_file_cache();
@@ -544,7 +537,7 @@ impl LocalDisk {
// TODO: async notifications for disk space checks and trash cleanup
let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
let trash_path = self.get_object_path(RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
// if let Some(parent) = trash_path.parent() {
// if !parent.exists() {
// fs::create_dir_all(parent).await?;
@@ -552,7 +545,7 @@ impl LocalDisk {
// }
let err = if recursive {
rename_all(delete_path, trash_path, self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?)
rename_all(delete_path, trash_path, self.get_bucket_path(RUSTFS_META_TMP_DELETED_BUCKET)?)
.await
.err()
} else {
@@ -562,12 +555,12 @@ impl LocalDisk {
.err()
};
if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR) {
let trash_path2 = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR_STR) {
let trash_path2 = self.get_object_path(RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
let _ = rename_all(
encode_dir_object(delete_path.to_string_lossy().as_ref()),
trash_path2,
self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?,
self.get_bucket_path(RUSTFS_META_TMP_DELETED_BUCKET)?,
)
.await;
}
@@ -903,7 +896,7 @@ impl LocalDisk {
}
if let Some(parent) = path.as_ref().parent() {
super::os::make_dir_all(parent, skip_parent).await?;
os::make_dir_all(parent, skip_parent).await?;
}
let f = super::fs::open_file(path.as_ref(), mode).await.map_err(to_file_error)?;
@@ -929,7 +922,7 @@ impl LocalDisk {
let meta = file.metadata().await.map_err(to_file_error)?;
let file_size = meta.len() as usize;
bitrot_verify(Box::new(file), file_size, part_size, algo, bytes::Bytes::copy_from_slice(sum), shard_size)
bitrot_verify(Box::new(file), file_size, part_size, algo, Bytes::copy_from_slice(sum), shard_size)
.await
.map_err(to_file_error)?;
@@ -1025,15 +1018,16 @@ impl LocalDisk {
continue;
}
if entry.ends_with(SLASH_SEPARATOR) {
if entry.ends_with(SLASH_SEPARATOR_STR) {
if entry.ends_with(GLOBAL_DIR_SUFFIX_WITH_SLASH) {
let entry = format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR);
let entry =
format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR_STR);
dir_objes.insert(entry.clone());
*item = entry;
continue;
}
*item = entry.trim_end_matches(SLASH_SEPARATOR).to_owned();
*item = entry.trim_end_matches(SLASH_SEPARATOR_STR).to_owned();
continue;
}
@@ -1045,7 +1039,7 @@ impl LocalDisk {
.await?;
let entry = entry.strip_suffix(STORAGE_FORMAT_FILE).unwrap_or_default().to_owned();
let name = entry.trim_end_matches(SLASH_SEPARATOR);
let name = entry.trim_end_matches(SLASH_SEPARATOR_STR);
let name = decode_dir_object(format!("{}/{}", &current, &name).as_str());
// if opts.limit > 0
@@ -1128,7 +1122,7 @@ impl LocalDisk {
Ok(res) => {
if is_dir_obj {
meta.name = meta.name.trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH).to_owned();
meta.name.push_str(SLASH_SEPARATOR);
meta.name.push_str(SLASH_SEPARATOR_STR);
}
meta.metadata = res.to_vec();
@@ -1146,7 +1140,7 @@ impl LocalDisk {
// NOT an object, append to stack (with slash)
// If dirObject, but no metadata (which is unexpected) we skip it.
if !is_dir_obj && !is_empty_dir(self.get_object_path(&opts.bucket, &meta.name)?).await {
meta.name.push_str(SLASH_SEPARATOR);
meta.name.push_str(SLASH_SEPARATOR_STR);
dir_stack.push(meta.name);
}
}
@@ -1229,7 +1223,7 @@ async fn read_file_metadata(p: impl AsRef<Path>) -> Result<Metadata> {
fn skip_access_checks(p: impl AsRef<str>) -> bool {
let vols = [
super::RUSTFS_META_TMP_DELETED_BUCKET,
RUSTFS_META_TMP_DELETED_BUCKET,
super::RUSTFS_META_TMP_BUCKET,
super::RUSTFS_META_MULTIPART_BUCKET,
RUSTFS_META_BUCKET,
@@ -1623,8 +1617,8 @@ impl DiskAPI for LocalDisk {
super::fs::access_std(&dst_volume_dir).map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?
}
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR);
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR);
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR_STR);
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR_STR);
if !src_is_dir && dst_is_dir || src_is_dir && !dst_is_dir {
warn!(
@@ -1690,8 +1684,8 @@ impl DiskAPI for LocalDisk {
.map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?;
}
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR);
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR);
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR_STR);
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR_STR);
if (dst_is_dir || src_is_dir) && (!dst_is_dir || !src_is_dir) {
return Err(Error::from(DiskError::FileAccessDenied));
}
@@ -1842,12 +1836,12 @@ impl DiskAPI for LocalDisk {
}
let volume_dir = self.get_bucket_path(volume)?;
let dir_path_abs = self.get_object_path(volume, dir_path.trim_start_matches(SLASH_SEPARATOR))?;
let dir_path_abs = self.get_object_path(volume, dir_path.trim_start_matches(SLASH_SEPARATOR_STR))?;
let entries = match os::read_dir(&dir_path_abs, count).await {
Ok(res) => res,
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound
if e.kind() == ErrorKind::NotFound
&& !skip_access_checks(volume)
&& let Err(e) = access(&volume_dir).await
{
@@ -2115,7 +2109,7 @@ impl DiskAPI for LocalDisk {
let volume_dir = self.get_bucket_path(volume)?;
if let Err(e) = access(&volume_dir).await {
if e.kind() == std::io::ErrorKind::NotFound {
if e.kind() == ErrorKind::NotFound {
os::make_dir_all(&volume_dir, self.root.as_path()).await?;
return Ok(());
}
@@ -2133,7 +2127,7 @@ impl DiskAPI for LocalDisk {
let entries = os::read_dir(&self.root, -1).await.map_err(to_volume_error)?;
for entry in entries {
if !has_suffix(&entry, SLASH_SEPARATOR) || !Self::is_valid_volname(clean(&entry).as_str()) {
if !has_suffix(&entry, SLASH_SEPARATOR_STR) || !Self::is_valid_volname(clean(&entry).as_str()) {
continue;
}
@@ -2355,7 +2349,7 @@ impl DiskAPI for LocalDisk {
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()> {
if path.starts_with(SLASH_SEPARATOR) {
if path.starts_with(SLASH_SEPARATOR_STR) {
return self
.delete(
volume,
@@ -2416,7 +2410,7 @@ impl DiskAPI for LocalDisk {
if !meta.versions.is_empty() {
let buf = meta.marshal_msg()?;
return self
.write_all_meta(volume, format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true)
.write_all_meta(volume, format!("{path}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true)
.await;
}
@@ -2426,11 +2420,11 @@ impl DiskAPI for LocalDisk {
{
let src_path = path_join(&[
file_path.as_path(),
Path::new(format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()),
Path::new(format!("{old_data_dir}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()),
]);
let dst_path = path_join(&[
file_path.as_path(),
Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()),
Path::new(format!("{path}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE}").as_str()),
]);
return rename_all(src_path, dst_path, file_path).await;
}
@@ -2579,7 +2573,7 @@ async fn get_disk_info(drive_path: PathBuf) -> Result<(rustfs_utils::os::DiskInf
if root_disk_threshold > 0 {
disk_info.total <= root_disk_threshold
} else {
is_root_disk(&drive_path, SLASH_SEPARATOR).unwrap_or_default()
is_root_disk(&drive_path, SLASH_SEPARATOR_STR).unwrap_or_default()
}
} else {
false
@@ -2597,7 +2591,7 @@ mod test {
// let arr = Vec::new();
let vols = [
super::super::RUSTFS_META_TMP_DELETED_BUCKET,
RUSTFS_META_TMP_DELETED_BUCKET,
super::super::RUSTFS_META_TMP_BUCKET,
super::super::RUSTFS_META_MULTIPART_BUCKET,
RUSTFS_META_BUCKET,
@@ -2625,9 +2619,7 @@ mod test {
let disk = LocalDisk::new(&ep, false).await.unwrap();
let tmpp = disk
.resolve_abs_path(Path::new(super::super::RUSTFS_META_TMP_DELETED_BUCKET))
.unwrap();
let tmpp = disk.resolve_abs_path(Path::new(RUSTFS_META_TMP_DELETED_BUCKET)).unwrap();
println!("ppp :{:?}", &tmpp);
@@ -2655,9 +2647,7 @@ mod test {
let disk = LocalDisk::new(&ep, false).await.unwrap();
let tmpp = disk
.resolve_abs_path(Path::new(super::super::RUSTFS_META_TMP_DELETED_BUCKET))
.unwrap();
let tmpp = disk.resolve_abs_path(Path::new(RUSTFS_META_TMP_DELETED_BUCKET)).unwrap();
println!("ppp :{:?}", &tmpp);

View File

@@ -19,7 +19,7 @@ use std::{
use super::error::Result;
use crate::disk::error_conv::to_file_error;
use rustfs_utils::path::SLASH_SEPARATOR;
use rustfs_utils::path::SLASH_SEPARATOR_STR;
use tokio::fs;
use tracing::warn;
@@ -118,7 +118,7 @@ pub async fn read_dir(path: impl AsRef<Path>, count: i32) -> std::io::Result<Vec
if file_type.is_file() {
volumes.push(name);
} else if file_type.is_dir() {
volumes.push(format!("{name}{SLASH_SEPARATOR}"));
volumes.push(format!("{name}{SLASH_SEPARATOR_STR}"));
}
count -= 1;
if count == 0 {

View File

@@ -38,7 +38,7 @@ use rustfs_common::defer;
use rustfs_common::heal_channel::HealOpts;
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
use rustfs_rio::{HashReader, WarpReader};
use rustfs_utils::path::{SLASH_SEPARATOR, encode_dir_object, path_join};
use rustfs_utils::path::{SLASH_SEPARATOR_STR, encode_dir_object, path_join};
use rustfs_workers::workers::Workers;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@@ -451,7 +451,7 @@ pub fn path2_bucket_object_with_base_path(base_path: &str, path: &str) -> (Strin
let trimmed_path = path
.strip_prefix(base_path)
.unwrap_or(path)
.strip_prefix(SLASH_SEPARATOR)
.strip_prefix(SLASH_SEPARATOR_STR)
.unwrap_or(path);
// Find the position of the first '/'
let Some(pos) = trimmed_path.find(SLASH_SEPARATOR) else {

View File

@@ -108,14 +108,19 @@ pub fn verify_rpc_signature(url: &str, method: &Method, headers: &HeaderMap) ->
}
// Generate expected signature
let expected_signature = generate_signature(&secret, url, method, timestamp);
// Compare signatures
if signature != expected_signature {
error!(
"verify_rpc_signature: Invalid signature: secret {}, url {}, method {}, timestamp {}, signature {}, expected_signature {}",
secret, url, method, timestamp, signature, expected_signature
"verify_rpc_signature: Invalid signature: url {}, method {}, timestamp {}, signature {}, expected_signature: {}***{}|{}",
url,
method,
timestamp,
signature,
expected_signature.chars().next().unwrap_or('*'),
expected_signature.chars().last().unwrap_or('*'),
expected_signature.len()
);
return Err(std::io::Error::other("Invalid signature"));

View File

@@ -83,7 +83,7 @@ use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX,
use rustfs_utils::{
HashAlgorithm,
crypto::hex,
path::{SLASH_SEPARATOR, encode_dir_object, has_suffix, path_join_buf},
path::{SLASH_SEPARATOR_STR, encode_dir_object, has_suffix, path_join_buf},
};
use rustfs_workers::workers::Workers;
use s3s::header::X_AMZ_RESTORE;
@@ -4630,7 +4630,9 @@ impl StorageAPI for SetDisks {
.await
.map_err(|e| to_object_err(e, vec![bucket, object]))?;
Ok(ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended))
let mut obj_info = ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended);
obj_info.size = goi.size;
Ok(obj_info)
}
#[tracing::instrument(skip(self))]
@@ -5340,7 +5342,7 @@ impl StorageAPI for SetDisks {
&upload_id_path,
fi.data_dir.map(|v| v.to_string()).unwrap_or_default().as_str(),
]),
SLASH_SEPARATOR
SLASH_SEPARATOR_STR
);
let mut part_numbers = match Self::list_parts(&online_disks, &part_path, read_quorum).await {
@@ -5478,7 +5480,7 @@ impl StorageAPI for SetDisks {
let mut populated_upload_ids = HashSet::new();
for upload_id in upload_ids.iter() {
let upload_id = upload_id.trim_end_matches(SLASH_SEPARATOR).to_string();
let upload_id = upload_id.trim_end_matches(SLASH_SEPARATOR_STR).to_string();
if populated_upload_ids.contains(&upload_id) {
continue;
}
@@ -6238,7 +6240,7 @@ impl StorageAPI for SetDisks {
None
};
if has_suffix(object, SLASH_SEPARATOR) {
if has_suffix(object, SLASH_SEPARATOR_STR) {
let (result, err) = self.heal_object_dir_locked(bucket, object, opts.dry_run, opts.remove).await?;
return Ok((result, err.map(|e| e.into())));
}

View File

@@ -34,7 +34,7 @@ use rustfs_filemeta::{
MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
merge_file_meta_versions,
};
use rustfs_utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix};
use rustfs_utils::path::{self, SLASH_SEPARATOR_STR, base_dir_from_prefix};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::broadcast::{self};
@@ -132,7 +132,7 @@ impl ListPathOptions {
return;
}
let s = SLASH_SEPARATOR.chars().next().unwrap_or_default();
let s = SLASH_SEPARATOR_STR.chars().next().unwrap_or_default();
self.filter_prefix = {
let fp = self.prefix.trim_start_matches(&self.base_dir).trim_matches(s);
@@ -346,7 +346,7 @@ impl ECStore {
if let Some(delimiter) = &delimiter {
if obj.is_dir && obj.mod_time.is_none() {
let mut found = false;
if delimiter != SLASH_SEPARATOR {
if delimiter != SLASH_SEPARATOR_STR {
for p in prefixes.iter() {
if found {
break;
@@ -470,7 +470,7 @@ impl ECStore {
if let Some(delimiter) = &delimiter {
if obj.is_dir && obj.mod_time.is_none() {
let mut found = false;
if delimiter != SLASH_SEPARATOR {
if delimiter != SLASH_SEPARATOR_STR {
for p in prefixes.iter() {
if found {
break;
@@ -502,7 +502,7 @@ impl ECStore {
// warn!("list_path opt {:?}", &o);
check_list_objs_args(&o.bucket, &o.prefix, &o.marker)?;
// if opts.prefix.ends_with(SLASH_SEPARATOR) {
// if opts.prefix.ends_with(SLASH_SEPARATOR_STR) {
// return Err(Error::msg("eof"));
// }
@@ -520,11 +520,11 @@ impl ECStore {
return Err(Error::Unexpected);
}
if o.prefix.starts_with(SLASH_SEPARATOR) {
if o.prefix.starts_with(SLASH_SEPARATOR_STR) {
return Err(Error::Unexpected);
}
let slash_separator = Some(SLASH_SEPARATOR.to_owned());
let slash_separator = Some(SLASH_SEPARATOR_STR.to_owned());
o.include_directories = o.separator == slash_separator;
@@ -774,8 +774,8 @@ impl ECStore {
let mut filter_prefix = {
prefix
.trim_start_matches(&path)
.trim_start_matches(SLASH_SEPARATOR)
.trim_end_matches(SLASH_SEPARATOR)
.trim_start_matches(SLASH_SEPARATOR_STR)
.trim_end_matches(SLASH_SEPARATOR_STR)
.to_owned()
};
@@ -1130,7 +1130,7 @@ async fn merge_entry_channels(
if path::clean(&best_entry.name) == path::clean(&other_entry.name) {
let dir_matches = best_entry.is_dir() && other_entry.is_dir();
let suffix_matches =
best_entry.name.ends_with(SLASH_SEPARATOR) == other_entry.name.ends_with(SLASH_SEPARATOR);
best_entry.name.ends_with(SLASH_SEPARATOR_STR) == other_entry.name.ends_with(SLASH_SEPARATOR_STR);
if dir_matches && suffix_matches {
to_merge.push(other_idx);

View File

@@ -51,7 +51,7 @@ use crate::{
store_api::{ObjectOptions, PutObjReader},
};
use rustfs_rio::HashReader;
use rustfs_utils::path::{SLASH_SEPARATOR, path_join};
use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join};
use s3s::S3ErrorCode;
use super::{
@@ -403,7 +403,7 @@ impl TierConfigMgr {
pub async fn save_tiering_config<S: StorageAPI>(&self, api: Arc<S>) -> std::result::Result<(), std::io::Error> {
let data = self.marshal()?;
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, TIER_CONFIG_FILE);
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR_STR, TIER_CONFIG_FILE);
self.save_config(api, &config_file, data).await
}
@@ -483,7 +483,7 @@ async fn new_and_save_tiering_config<S: StorageAPI>(api: Arc<S>) -> Result<TierC
#[tracing::instrument(level = "debug", name = "load_tier_config", skip(api))]
async fn load_tier_config(api: Arc<ECStore>) -> std::result::Result<TierConfigMgr, std::io::Error> {
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, TIER_CONFIG_FILE);
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR_STR, TIER_CONFIG_FILE);
let data = read_config(api.clone(), config_file.as_str()).await;
if let Err(err) = data {
if is_err_config_not_found(&err) {

View File

@@ -30,13 +30,11 @@ use crate::client::{
transition_api::{Options, TransitionClient, TransitionCore},
transition_api::{ReadCloser, ReaderImpl},
};
use crate::error::ErrorResponse;
use crate::error::error_resp_to_object_err;
use crate::tier::{
tier_config::TierS3,
warm_backend::{WarmBackend, WarmBackendGetOpts},
};
use rustfs_utils::path::SLASH_SEPARATOR;
use rustfs_utils::path::SLASH_SEPARATOR_STR;
pub struct WarmBackendS3 {
pub client: Arc<TransitionClient>,
@@ -178,7 +176,7 @@ impl WarmBackend for WarmBackendS3 {
async fn in_use(&self) -> Result<bool, std::io::Error> {
let result = self
.core
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR_STR, 1)
.await?;
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)

View File

@@ -27,19 +27,11 @@ use aws_sdk_s3::Client;
use aws_sdk_s3::config::{Credentials, Region};
use aws_sdk_s3::primitives::ByteStream;
use crate::client::{
api_get_options::GetObjectOptions,
api_put_object::PutObjectOptions,
api_remove::RemoveObjectOptions,
transition_api::{ReadCloser, ReaderImpl},
};
use crate::error::ErrorResponse;
use crate::error::error_resp_to_object_err;
use crate::client::transition_api::{ReadCloser, ReaderImpl};
use crate::tier::{
tier_config::TierS3,
warm_backend::{WarmBackend, WarmBackendGetOpts},
};
use rustfs_utils::path::SLASH_SEPARATOR;
pub struct WarmBackendS3 {
pub client: Arc<Client>,

View File

@@ -32,7 +32,7 @@ use rustfs_ecstore::{
store_api::{ObjectInfo, ObjectOptions},
};
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join_buf};
use serde::{Serialize, de::DeserializeOwned};
use std::sync::LazyLock;
use std::{collections::HashMap, sync::Arc};
@@ -182,7 +182,7 @@ impl ObjectStore {
} else {
info.name
};
let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR);
let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR_STR);
let _ = sender
.send(StringOrErr {
item: Some(name.to_owned()),

View File

@@ -37,11 +37,6 @@ impl TargetID {
Self { id, name }
}
/// Convert to string representation
pub fn to_id_string(&self) -> String {
format!("{}:{}", self.id, self.name)
}
/// Create an ARN
pub fn to_arn(&self, region: &str) -> ARN {
ARN {
@@ -80,7 +75,7 @@ impl Serialize for TargetID {
where
S: Serializer,
{
serializer.serialize_str(&self.to_id_string())
serializer.serialize_str(&self.to_string())
}
}
@@ -130,7 +125,7 @@ impl ARN {
if self.target_id.id.is_empty() && self.target_id.name.is_empty() && self.region.is_empty() {
return String::new();
}
format!("{}:{}:{}", ARN_PREFIX, self.region, self.target_id.to_id_string())
format!("{}:{}:{}", ARN_PREFIX, self.region, self.target_id)
}
/// Parsing ARN from string

View File

@@ -72,7 +72,7 @@ rand = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
[target.'cfg(windows)'.dependencies]
winapi = { workspace = true, optional = true, features = ["std", "fileapi", "minwindef", "ntdef", "winnt"] }
windows = { workspace = true, optional = true, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
[lints]
workspace = true
@@ -83,13 +83,13 @@ ip = ["dep:local-ip-address"] # ip characteristics and their dependencies
tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls characteristics and their dependencies
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:thiserror", "dep:tokio"] # network features with DNS resolver
io = ["dep:tokio"]
path = []
path = [] # path manipulation features
notify = ["dep:hyper", "dep:s3s", "dep:hashbrown", "dep:thiserror", "dep:serde", "dep:libc", "dep:url", "dep:regex"] # file system notification features
compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"]
string = ["dep:regex"]
crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"]
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:crc-fast"]
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
os = ["dep:nix", "dep:tempfile", "dep:windows"] # operating system utilities
integration = [] # integration test features
sys = ["dep:sysinfo"] # system information features
http = ["dep:convert_case", "dep:http", "dep:regex"]

View File

@@ -14,8 +14,6 @@
use bytes::Bytes;
use futures::{Stream, StreamExt, pin_mut};
#[cfg(test)]
use std::sync::MutexGuard;
use std::{
collections::{HashMap, HashSet},
fmt::Display,
@@ -83,7 +81,7 @@ fn reset_dns_resolver_inner() {
#[cfg(test)]
pub struct MockResolverGuard {
_lock: MutexGuard<'static, ()>,
_lock: std::sync::MutexGuard<'static, ()>,
}
#[cfg(test)]

View File

@@ -1,4 +1,3 @@
#![allow(unsafe_code)] // TODO: audit unsafe code
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,149 +12,232 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{DiskInfo, IOStats};
#![allow(unsafe_code)] // TODO: audit unsafe code
use crate::os::{DiskInfo, IOStats};
use std::io::Error;
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::path::Path;
use winapi::shared::minwindef::{DWORD, MAX_PATH};
use winapi::shared::ntdef::ULARGE_INTEGER;
use winapi::um::fileapi::{GetDiskFreeSpaceExW, GetDiskFreeSpaceW, GetVolumeInformationW, GetVolumePathNameW};
use winapi::um::winnt::{LPCWSTR, WCHAR};
use windows::Win32::Foundation::MAX_PATH;
use windows::Win32::Storage::FileSystem::{GetDiskFreeSpaceExW, GetDiskFreeSpaceW, GetVolumeInformationW, GetVolumePathNameW};
/// Returns total and free bytes available in a directory, e.g. `C:\`.
pub fn get_info(p: impl AsRef<Path>) -> std::io::Result<DiskInfo> {
let path_display = p.as_ref().display();
let path_wide: Vec<WCHAR> = p
let path_wide = p
.as_ref()
.to_path_buf()
.into_os_string()
.encode_wide()
.chain(std::iter::once(0)) // Null-terminate the string
.collect();
.to_string_lossy()
.encode_utf16()
.chain(std::iter::once(0))
.collect::<Vec<u16>>();
let mut lp_free_bytes_available: ULARGE_INTEGER = unsafe { mem::zeroed() };
let mut lp_total_number_of_bytes: ULARGE_INTEGER = unsafe { mem::zeroed() };
let mut lp_total_number_of_free_bytes: ULARGE_INTEGER = unsafe { mem::zeroed() };
let mut free_bytes_available = 0u64;
let mut total_number_of_bytes = 0u64;
let mut total_number_of_free_bytes = 0u64;
let success = unsafe {
unsafe {
GetDiskFreeSpaceExW(
path_wide.as_ptr(),
&mut lp_free_bytes_available,
&mut lp_total_number_of_bytes,
&mut lp_total_number_of_free_bytes,
windows::core::PCWSTR::from_raw(path_wide.as_ptr()),
Some(&mut free_bytes_available),
Some(&mut total_number_of_bytes),
Some(&mut total_number_of_free_bytes),
)
};
if success == 0 {
return Err(Error::last_os_error());
.map_err(|e| Error::from_raw_os_error(e.code().0 as i32))?;
}
let total = unsafe { *lp_total_number_of_bytes.QuadPart() };
let free = unsafe { *lp_total_number_of_free_bytes.QuadPart() };
let total = total_number_of_bytes;
let free = total_number_of_free_bytes;
if free > total {
return Err(Error::other(format!(
"detected free space ({free}) > total drive space ({total}), fs corruption at ({path_display}). please run 'fsck'"
"detected free space ({free}) > total drive space ({total}), fs corruption at ({}). please run 'fsck'",
p.as_ref().display()
)));
}
let mut lp_sectors_per_cluster: DWORD = 0;
let mut lp_bytes_per_sector: DWORD = 0;
let mut lp_number_of_free_clusters: DWORD = 0;
let mut lp_total_number_of_clusters: DWORD = 0;
let mut sectors_per_cluster = 0u32;
let mut bytes_per_sector = 0u32;
let mut number_of_free_clusters = 0u32;
let mut total_number_of_clusters = 0u32;
let success = unsafe {
unsafe {
GetDiskFreeSpaceW(
path_wide.as_ptr(),
&mut lp_sectors_per_cluster,
&mut lp_bytes_per_sector,
&mut lp_number_of_free_clusters,
&mut lp_total_number_of_clusters,
windows::core::PCWSTR::from_raw(path_wide.as_ptr()),
Some(&mut sectors_per_cluster),
Some(&mut bytes_per_sector),
Some(&mut number_of_free_clusters),
Some(&mut total_number_of_clusters),
)
};
if success == 0 {
return Err(Error::last_os_error());
.map_err(|e| Error::from_raw_os_error(e.code().0 as i32))?;
}
Ok(DiskInfo {
total,
free,
used: total - free,
files: lp_total_number_of_clusters as u64,
ffree: lp_number_of_free_clusters as u64,
// TODO This field is currently unused, and since this logic causes a
// NotFound error during startup on Windows systems, it has been commented out here
//
// The error occurs in GetVolumeInformationW where the path parameter
// is of type [WCHAR; MAX_PATH]. For a drive letter, there are excessive
// trailing zeros, which causes the failure here.
//
// fstype: get_fs_type(&path_wide)?,
files: total_number_of_clusters as u64,
ffree: number_of_free_clusters as u64,
fstype: get_fs_type(&path_wide).unwrap_or_default(),
..Default::default()
})
}
/// Returns leading volume name.
///
/// # Arguments
/// * `v` - A slice of u16 representing the path in UTF-16 encoding
///
/// # Returns
/// * `Ok(Vec<u16>)` containing the volume name in UTF-16 encoding.
/// * `Err` if an error occurs during the operation.
#[allow(dead_code)]
fn get_volume_name(v: &[WCHAR]) -> std::io::Result<LPCWSTR> {
let volume_name_size: DWORD = MAX_PATH as _;
let mut lp_volume_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH];
fn get_volume_name(v: &[u16]) -> std::io::Result<Vec<u16>> {
let mut volume_name_buffer = [0u16; MAX_PATH as usize];
let success = unsafe { GetVolumePathNameW(v.as_ptr(), lp_volume_name_buffer.as_mut_ptr(), volume_name_size) };
if success == 0 {
return Err(Error::last_os_error());
unsafe {
GetVolumePathNameW(windows::core::PCWSTR::from_raw(v.as_ptr()), &mut volume_name_buffer)
.map_err(|e| Error::from_raw_os_error(e.code().0 as i32))?;
}
Ok(lp_volume_name_buffer.as_ptr())
let len = volume_name_buffer
.iter()
.position(|&x| x == 0)
.unwrap_or(volume_name_buffer.len());
Ok(volume_name_buffer[..len].to_vec())
}
#[allow(dead_code)]
fn utf16_to_string(v: &[WCHAR]) -> String {
fn utf16_to_string(v: &[u16]) -> String {
let len = v.iter().position(|&x| x == 0).unwrap_or(v.len());
String::from_utf16_lossy(&v[..len])
}
/// Returns the filesystem type of the underlying mounted filesystem
///
/// # Arguments
/// * `p` - A slice of u16 representing the path in UTF-16 encoding
///
/// # Returns
/// * `Ok(String)` containing the filesystem type (e.g., "NTFS", "FAT32").
/// * `Err` if an error occurs during the operation.
#[allow(dead_code)]
fn get_fs_type(p: &[WCHAR]) -> std::io::Result<String> {
fn get_fs_type(p: &[u16]) -> std::io::Result<String> {
let path = get_volume_name(p)?;
let volume_name_size: DWORD = MAX_PATH as _;
let n_file_system_name_size: DWORD = MAX_PATH as _;
let mut volume_serial_number = 0u32;
let mut maximum_component_length = 0u32;
let mut file_system_flags = 0u32;
let mut volume_name_buffer = [0u16; MAX_PATH as usize];
let mut file_system_name_buffer = [0u16; MAX_PATH as usize];
let mut lp_volume_serial_number: DWORD = 0;
let mut lp_maximum_component_length: DWORD = 0;
let mut lp_file_system_flags: DWORD = 0;
let mut lp_volume_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH];
let mut lp_file_system_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH];
let success = unsafe {
unsafe {
GetVolumeInformationW(
path,
lp_volume_name_buffer.as_mut_ptr(),
volume_name_size,
&mut lp_volume_serial_number,
&mut lp_maximum_component_length,
&mut lp_file_system_flags,
lp_file_system_name_buffer.as_mut_ptr(),
n_file_system_name_size,
windows::core::PCWSTR::from_raw(path.as_ptr()),
Some(&mut volume_name_buffer),
Some(&mut volume_serial_number),
Some(&mut maximum_component_length),
Some(&mut file_system_flags),
Some(&mut file_system_name_buffer),
)
};
if success == 0 {
return Err(Error::last_os_error());
.map_err(|e| Error::from_raw_os_error(e.code().0 as i32))?;
}
Ok(utf16_to_string(&lp_file_system_name_buffer))
Ok(utf16_to_string(&file_system_name_buffer))
}
pub fn same_disk(_disk1: &str, _disk2: &str) -> std::io::Result<bool> {
Ok(false)
/// Determines if two paths are on the same disk.
///
/// # Arguments
/// * `disk1` - The first disk path as a string slice.
/// * `disk2` - The second disk path as a string slice.
///
/// # Returns
/// * `Ok(true)` if both paths are on the same disk.
/// * `Ok(false)` if both paths are on different disks.
/// * `Err` if an error occurs during the operation.
pub fn same_disk(disk1: &str, disk2: &str) -> std::io::Result<bool> {
let path1_wide: Vec<u16> = disk1.encode_utf16().chain(std::iter::once(0)).collect();
let path2_wide: Vec<u16> = disk2.encode_utf16().chain(std::iter::once(0)).collect();
let volume1 = get_volume_name(&path1_wide)?;
let volume2 = get_volume_name(&path2_wide)?;
Ok(volume1 == volume2)
}
/// Retrieves I/O statistics for a drive identified by its major and minor numbers.
///
/// # Arguments
/// * `major` - The major number of the drive.
/// * `minor` - The minor number of the drive.
///
/// # Returns
/// * `Ok(IOStats)` containing the I/O statistics.
/// * `Err` if an error occurs during the operation.
pub fn get_drive_stats(_major: u32, _minor: u32) -> std::io::Result<IOStats> {
// Windows does not provide direct IO stats via simple API; this is a stub
// For full implementation, consider using PDH or WMI, but that adds complexity
Ok(IOStats::default())
}
#[cfg(test)]
mod tests {
use crate::os::{get_info, same_disk};
#[cfg(target_os = "windows")]
#[test]
fn test_get_info_valid_path() {
let temp_dir = tempfile::tempdir().unwrap();
let info = get_info(temp_dir.path()).unwrap();
// Verify disk info is valid
assert!(info.total > 0);
assert!(info.free > 0);
assert!(info.used > 0);
assert!(info.files > 0);
assert!(info.ffree > 0);
assert!(!info.fstype.is_empty());
}
#[cfg(target_os = "windows")]
#[test]
fn test_get_info_invalid_path() {
use std::path::PathBuf;
let invalid_path = PathBuf::from("Z:\\invalid\\path");
let result = get_info(&invalid_path);
assert!(result.is_err());
}
#[cfg(target_os = "windows")]
#[test]
fn test_same_disk_same_path() {
let temp_dir = tempfile::tempdir().unwrap();
let path = temp_dir.path().to_str().unwrap();
let result = same_disk(path, path).unwrap();
assert!(result);
}
#[cfg(target_os = "windows")]
#[test]
fn test_same_disk_different_paths() {
let temp_dir1 = tempfile::tempdir().unwrap();
let temp_dir2 = tempfile::tempdir().unwrap();
let path1 = temp_dir1.path().to_str().unwrap();
let path2 = temp_dir2.path().to_str().unwrap();
let _result = same_disk(path1, path2).unwrap();
// Since both temporary directories are created in the same file system,
// they should be on the same disk in most cases
// Test passes if the function doesn't panic - the actual result depends on test environment
}
#[cfg(target_os = "windows")]
#[test]
fn get_info_with_root_drive() {
let info = get_info("C:\\").unwrap();
assert!(info.total > 0);
assert!(info.free > 0);
assert!(info.used > 0);
assert!(info.files > 0);
assert!(info.ffree > 0);
assert!(!info.fstype.is_empty());
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -110,6 +110,10 @@ RustFS helm chart supports **standalone and distributed mode**. For standalone m
| storageclass.logStorageSize | string | `"256Mi"` | The storage size for logs PVC. |
| storageclass.name | string | `"local-path"` | The name for StorageClass. |
| tolerations | list | `[]` | |
| gatewayApi.enabled | bool | `false` | To enable/disable gateway api support. |
| gatewayApi.gatewayClass | string | `traefik` | Gateway class implementation. |
| gatewayApi.hostname | string | Hostname to access RustFS via gateway api. |
| gatewayApi.secretName | string | Secret tls to via RustFS using HTTPS. |
---
@@ -207,6 +211,22 @@ You should use `--set-file` parameter when running `helm install` command, for e
helm install rustfs rustfs/rustfs -n rustfs --set tls.enabled=true,--set-file tls.crt=./tls.crt,--set-file tls.key=./tls.key
```
# Gateway API support (alpha)
Due to [ingress nginx retirement](https://kubernetes.io/blog/2025/11/11/ingress-nginx-retirement/) in March 2026, so RustFS adds support for [gateway api](https://gateway-api.sigs.k8s.io/). Currently, RustFS only supports traefik as gateway class, more and more gateway class support will be added in the future after those classes are tested. If you want to enable gateway api, specify `gatewayApi.enabled` to `true` while specify `ingress.enabled` to `false`. After installation, you can find the `Gateway` and `HttpRoute` resources,
```
$ kubectl -n rustfs get gateway
NAME CLASS ADDRESS PROGRAMMED AGE
rustfs-gateway traefik True 169m
$ kubectl -n rustfs get httproute
NAME HOSTNAMES AGE
rustfs-route ["example.rustfs.com"] 172m
```
Then, via RustFS instance via `https://example.rustfs.com` or `http://example.rustfs.com`.
# Uninstall
Uninstalling the rustfs installation with command,

View File

@@ -10,6 +10,10 @@ metadata:
{{- end }}
spec:
replicas: 1
{{- with .Values.mode.standalone.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "rustfs.selectorLabels" . | nindent 6 }}

View File

@@ -0,0 +1,23 @@
{{- if .Values.gatewayApi.enabled }}
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: {{ include "rustfs.fullname" . }}-gateway
spec:
gatewayClassName: {{ .Values.gatewayApi.gatewayClass }}
listeners:
- name: http
port: 80
protocol: HTTP
allowedRoutes:
namespaces:
from: Same
- name: https
port: 443
protocol: HTTPS
tls:
mode: Terminate
certificateRefs:
- name: {{ .Values.gatewayApi.secretName }}
kind: Secret
{{- end }}

View File

@@ -0,0 +1,19 @@
{{- if .Values.gatewayApi.enabled -}}
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: {{ include "rustfs.fullname" . }}-route
spec:
parentRefs:
- name: {{ include "rustfs.fullname" . }}-gateway
hostnames:
- {{ .Values.gatewayApi.hostname }}
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: rustfs-svc
port: 9001
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if and .Values.ingress.tls.enabled (not .Values.ingress.tls.certManager.enabled) }}
{{- if and (or .Values.gatewayApi.enabled .Values.ingress.tls.enabled) (not .Values.ingress.tls.certManager.enabled) }}
apiVersion: v1
kind: Secret
metadata:

View File

@@ -11,7 +11,7 @@ image:
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
tag: ""
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
@@ -30,6 +30,11 @@ fullnameOverride: ""
mode:
standalone:
enabled: false
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
distributed:
enabled: true
@@ -130,6 +135,12 @@ ingress:
crt: tls.crt
key: tls.key
gatewayApi:
enabled: false
gatewayClass: traefik
hostname: example.rustfs.com
secretName: secret-tls
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little

View File

@@ -83,6 +83,7 @@ pub mod kms_keys;
pub mod policies;
pub mod pools;
pub mod profile;
pub mod quota;
pub mod rebalance;
pub mod service_account;
pub mod sts;

View File

@@ -12,11 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{
collections::HashMap,
io::{Cursor, Read as _, Write as _},
};
use crate::{
admin::{auth::validate_admin_request, router::Operation},
auth::{check_key_valid, get_session_token},
@@ -49,7 +44,7 @@ use rustfs_policy::policy::{
BucketPolicy,
action::{Action, AdminAction},
};
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join_buf};
use s3s::{
Body, S3Request, S3Response, S3Result,
dto::{
@@ -61,6 +56,10 @@ use s3s::{
};
use serde::Deserialize;
use serde_urlencoded::from_bytes;
use std::{
collections::HashMap,
io::{Cursor, Read as _, Write as _},
};
use time::OffsetDateTime;
use tracing::warn;
use zip::{ZipArchive, ZipWriter, write::SimpleFileOptions};
@@ -424,7 +423,7 @@ impl Operation for ImportBucketMetadata {
// Extract bucket names
let mut bucket_names = Vec::new();
for (file_path, _) in &file_contents {
let file_path_split = file_path.split(SLASH_SEPARATOR).collect::<Vec<&str>>();
let file_path_split = file_path.split(SLASH_SEPARATOR_STR).collect::<Vec<&str>>();
if file_path_split.len() < 2 {
warn!("file path is invalid: {}", file_path);
@@ -463,7 +462,7 @@ impl Operation for ImportBucketMetadata {
// Second pass: process file contents
for (file_path, content) in file_contents {
let file_path_split = file_path.split(SLASH_SEPARATOR).collect::<Vec<&str>>();
let file_path_split = file_path.split(SLASH_SEPARATOR_STR).collect::<Vec<&str>>();
if file_path_split.len() < 2 {
warn!("file path is invalid: {}", file_path);

View File

@@ -0,0 +1,485 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Quota admin handlers for HTTP API
use super::Operation;
use crate::admin::auth::validate_admin_request;
use crate::auth::{check_key_valid, get_session_token};
use hyper::StatusCode;
use matchit::Params;
use rustfs_ecstore::bucket::quota::checker::QuotaChecker;
use rustfs_ecstore::bucket::quota::{BucketQuota, QuotaError, QuotaOperation};
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::{Body, S3Request, S3Response, S3Result, s3_error};
use serde::{Deserialize, Serialize};
use serde_json;
use tracing::{debug, info, warn};
#[derive(Debug, Deserialize)]
pub struct SetBucketQuotaRequest {
pub quota: Option<u64>,
#[serde(default = "default_quota_type")]
pub quota_type: String,
}
fn default_quota_type() -> String {
rustfs_config::QUOTA_TYPE_HARD.to_string()
}
#[derive(Debug, Serialize)]
pub struct BucketQuotaResponse {
pub bucket: String,
pub quota: Option<u64>,
pub size: u64,
/// Current usage size in bytes
pub quota_type: String,
}
#[derive(Debug, Serialize)]
pub struct BucketQuotaStats {
pub bucket: String,
pub quota_limit: Option<u64>,
pub current_usage: u64,
pub remaining_quota: Option<u64>,
pub usage_percentage: Option<f64>,
}
#[derive(Debug, Deserialize)]
pub struct CheckQuotaRequest {
pub operation_type: String,
pub operation_size: u64,
}
#[derive(Debug, Serialize)]
pub struct CheckQuotaResponse {
pub bucket: String,
pub operation_type: String,
pub operation_size: u64,
pub allowed: bool,
pub current_usage: u64,
pub quota_limit: Option<u64>,
pub remaining_quota: Option<u64>,
}
/// Quota management handlers
pub struct SetBucketQuotaHandler;
pub struct GetBucketQuotaHandler;
pub struct ClearBucketQuotaHandler;
pub struct GetBucketQuotaStatsHandler;
pub struct CheckBucketQuotaHandler;
#[async_trait::async_trait]
impl Operation for SetBucketQuotaHandler {
#[tracing::instrument(skip_all)]
async fn call(&self, mut req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle SetBucketQuota");
let Some(ref cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::SetBucketQuotaAdminAction)],
None,
)
.await?;
let bucket = params.get("bucket").unwrap_or("").to_string();
if bucket.is_empty() {
return Err(s3_error!(InvalidRequest, "bucket name is required"));
}
let body = req
.input
.store_all_limited(rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let request: SetBucketQuotaRequest = if body.is_empty() {
SetBucketQuotaRequest {
quota: None,
quota_type: default_quota_type(),
}
} else {
serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))?
};
if request.quota_type.to_uppercase() != rustfs_config::QUOTA_TYPE_HARD {
return Err(s3_error!(InvalidArgument, "{}", rustfs_config::QUOTA_INVALID_TYPE_ERROR_MSG));
}
let quota = BucketQuota::new(request.quota);
let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys
.get()
.ok_or_else(|| s3_error!(InternalError, "{}", rustfs_config::QUOTA_METADATA_SYSTEM_ERROR_MSG))?;
let mut quota_checker = QuotaChecker::new(metadata_sys_lock.clone());
quota_checker
.set_quota_config(&bucket, quota.clone())
.await
.map_err(|e| s3_error!(InternalError, "Failed to set quota: {}", e))?;
// Get real-time usage from data usage system
let current_usage = if let Some(store) = rustfs_ecstore::global::GLOBAL_OBJECT_API.get() {
match rustfs_ecstore::data_usage::load_data_usage_from_backend(store.clone()).await {
Ok(data_usage_info) => data_usage_info
.buckets_usage
.get(&bucket)
.map(|bucket_usage| bucket_usage.size)
.unwrap_or(0),
Err(_) => 0,
}
} else {
0
};
let response = BucketQuotaResponse {
bucket,
quota: quota.quota,
size: current_usage,
quota_type: rustfs_config::QUOTA_TYPE_HARD.to_string(),
};
let json =
serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?;
Ok(S3Response::new((StatusCode::OK, Body::from(json))))
}
}
#[async_trait::async_trait]
impl Operation for GetBucketQuotaHandler {
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle GetBucketQuota");
let Some(ref cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::GetBucketQuotaAdminAction)],
None,
)
.await?;
let bucket = params.get("bucket").unwrap_or("").to_string();
if bucket.is_empty() {
return Err(s3_error!(InvalidRequest, "bucket name is required"));
}
let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys
.get()
.ok_or_else(|| s3_error!(InternalError, "Bucket metadata system not initialized"))?;
let quota_checker = QuotaChecker::new(metadata_sys_lock.clone());
let (quota, current_usage) = quota_checker.get_quota_stats(&bucket).await.map_err(|e| match e {
QuotaError::ConfigNotFound { .. } => {
s3_error!(NoSuchBucket, "Bucket not found: {}", bucket)
}
_ => s3_error!(InternalError, "Failed to get quota: {}", e),
})?;
let response = BucketQuotaResponse {
bucket,
quota: quota.quota,
size: current_usage.unwrap_or(0),
quota_type: rustfs_config::QUOTA_TYPE_HARD.to_string(),
};
let json =
serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?;
Ok(S3Response::new((StatusCode::OK, Body::from(json))))
}
}
#[async_trait::async_trait]
impl Operation for ClearBucketQuotaHandler {
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle ClearBucketQuota");
let Some(ref cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::SetBucketQuotaAdminAction)],
None,
)
.await?;
let bucket = params.get("bucket").unwrap_or("").to_string();
if bucket.is_empty() {
return Err(s3_error!(InvalidRequest, "bucket name is required"));
}
info!("Clearing quota for bucket: {}", bucket);
let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys
.get()
.ok_or_else(|| s3_error!(InternalError, "Bucket metadata system not initialized"))?;
let mut quota_checker = QuotaChecker::new(metadata_sys_lock.clone());
// Clear quota (set to None)
let quota = BucketQuota::new(None);
quota_checker
.set_quota_config(&bucket, quota.clone())
.await
.map_err(|e| s3_error!(InternalError, "Failed to clear quota: {}", e))?;
info!("Successfully cleared quota for bucket: {}", bucket);
// Get real-time usage from data usage system
let current_usage = if let Some(store) = rustfs_ecstore::global::GLOBAL_OBJECT_API.get() {
match rustfs_ecstore::data_usage::load_data_usage_from_backend(store.clone()).await {
Ok(data_usage_info) => data_usage_info
.buckets_usage
.get(&bucket)
.map(|bucket_usage| bucket_usage.size)
.unwrap_or(0),
Err(_) => 0,
}
} else {
0
};
let response = BucketQuotaResponse {
bucket,
quota: None,
size: current_usage,
quota_type: rustfs_config::QUOTA_TYPE_HARD.to_string(),
};
let json =
serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?;
Ok(S3Response::new((StatusCode::OK, Body::from(json))))
}
}
#[async_trait::async_trait]
impl Operation for GetBucketQuotaStatsHandler {
#[tracing::instrument(skip_all)]
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle GetBucketQuotaStats");
let Some(ref cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::GetBucketQuotaAdminAction)],
None,
)
.await?;
let bucket = params.get("bucket").unwrap_or("").to_string();
if bucket.is_empty() {
return Err(s3_error!(InvalidRequest, "bucket name is required"));
}
let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys
.get()
.ok_or_else(|| s3_error!(InternalError, "Bucket metadata system not initialized"))?;
let quota_checker = QuotaChecker::new(metadata_sys_lock.clone());
let (quota, current_usage_opt) = quota_checker.get_quota_stats(&bucket).await.map_err(|e| match e {
QuotaError::ConfigNotFound { .. } => {
s3_error!(NoSuchBucket, "Bucket not found: {}", bucket)
}
_ => s3_error!(InternalError, "Failed to get quota stats: {}", e),
})?;
let current_usage = current_usage_opt.unwrap_or(0);
let usage_percentage = quota.quota.and_then(|limit| {
if limit == 0 {
None
} else {
Some((current_usage as f64 / limit as f64) * 100.0)
}
});
let remaining_quota = quota.get_remaining_quota(current_usage);
let response = BucketQuotaStats {
bucket,
quota_limit: quota.quota,
current_usage,
remaining_quota,
usage_percentage,
};
let json =
serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?;
Ok(S3Response::new((StatusCode::OK, Body::from(json))))
}
}
#[async_trait::async_trait]
impl Operation for CheckBucketQuotaHandler {
#[tracing::instrument(skip_all)]
async fn call(&self, mut req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle CheckBucketQuota");
let Some(ref cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "authentication required"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::GetBucketQuotaAdminAction)],
None,
)
.await?;
let bucket = params.get("bucket").unwrap_or("").to_string();
if bucket.is_empty() {
return Err(s3_error!(InvalidRequest, "bucket name is required"));
}
let body = req
.input
.store_all_limited(rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE)
.await
.map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?;
let request: CheckQuotaRequest = if body.is_empty() {
return Err(s3_error!(InvalidRequest, "request body cannot be empty"));
} else {
serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))?
};
debug!(
"Checking quota for bucket: {}, operation: {}, size: {}",
bucket, request.operation_type, request.operation_size
);
let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys
.get()
.ok_or_else(|| s3_error!(InternalError, "Bucket metadata system not initialized"))?;
let quota_checker = QuotaChecker::new(metadata_sys_lock.clone());
let operation: QuotaOperation = match request.operation_type.to_uppercase().as_str() {
"PUT" | "PUTOBJECT" => QuotaOperation::PutObject,
"COPY" | "COPYOBJECT" => QuotaOperation::CopyObject,
"DELETE" | "DELETEOBJECT" => QuotaOperation::DeleteObject,
_ => QuotaOperation::PutObject, // Default to PUT operation
};
let result = quota_checker
.check_quota(&bucket, operation, request.operation_size)
.await
.map_err(|e| s3_error!(InternalError, "Failed to check quota: {}", e))?;
let response = CheckQuotaResponse {
bucket,
operation_type: request.operation_type,
operation_size: request.operation_size,
allowed: result.allowed,
current_usage: result.current_usage,
quota_limit: result.quota_limit,
remaining_quota: result.remaining,
};
let json =
serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?;
Ok(S3Response::new((StatusCode::OK, Body::from(json))))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_quota_type() {
assert_eq!(default_quota_type(), "HARD");
}
#[test]
fn test_quota_operation_parsing() {
let parse_operation = |operation: &str| match operation.to_uppercase().as_str() {
"PUT" | "PUTOBJECT" => QuotaOperation::PutObject,
"COPY" | "COPYOBJECT" => QuotaOperation::CopyObject,
"DELETE" | "DELETEOBJECT" => QuotaOperation::DeleteObject,
_ => QuotaOperation::PutObject,
};
assert!(matches!(parse_operation("put"), QuotaOperation::PutObject));
assert!(matches!(parse_operation("PUT"), QuotaOperation::PutObject));
assert!(matches!(parse_operation("PutObject"), QuotaOperation::PutObject));
assert!(matches!(parse_operation("copy"), QuotaOperation::CopyObject));
assert!(matches!(parse_operation("DELETE"), QuotaOperation::DeleteObject));
assert!(matches!(parse_operation("unknown"), QuotaOperation::PutObject));
}
#[tokio::test]
async fn test_quota_response_serialization() {
let response = BucketQuotaResponse {
bucket: "test-bucket".to_string(),
quota: Some(2147483648),
size: 1073741824,
quota_type: rustfs_config::QUOTA_TYPE_HARD.to_string(),
};
let json = serde_json::to_string(&response).unwrap();
assert!(json.contains("test-bucket"));
assert!(json.contains("2147483648"));
assert!(json.contains("HARD"));
}
}

View File

@@ -29,7 +29,7 @@ use handlers::{
event::{ListNotificationTargets, ListTargetsArns, NotificationTarget, RemoveNotificationTarget},
group, kms, kms_dynamic, kms_keys, policies, pools,
profile::{TriggerProfileCPU, TriggerProfileMemory},
rebalance,
quota, rebalance,
service_account::{AddServiceAccount, DeleteServiceAccount, InfoServiceAccount, ListServiceAccount, UpdateServiceAccount},
sts, tier, user,
};
@@ -202,6 +202,32 @@ pub fn make_admin_route(console_enabled: bool) -> std::io::Result<impl S3Route>
AdminOperation(&tier::ClearTier {}),
)?;
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/quota/{bucket}").as_str(),
AdminOperation(&quota::SetBucketQuotaHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/quota/{bucket}").as_str(),
AdminOperation(&quota::GetBucketQuotaHandler {}),
)?;
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/quota/{bucket}").as_str(),
AdminOperation(&quota::ClearBucketQuotaHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/quota-stats/{bucket}").as_str(),
AdminOperation(&quota::GetBucketQuotaStatsHandler {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/quota-check/{bucket}").as_str(),
AdminOperation(&quota::CheckBucketQuotaHandler {}),
)?;
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/export-bucket-metadata").as_str(),
@@ -239,14 +265,12 @@ pub fn make_admin_route(console_enabled: bool) -> std::io::Result<impl S3Route>
)?;
// Performance profiling endpoints (available on all platforms, with platform-specific responses)
#[cfg(not(target_os = "windows"))]
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/debug/pprof/profile").as_str(),
AdminOperation(&handlers::ProfileHandler {}),
)?;
#[cfg(not(target_os = "windows"))]
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/debug/pprof/status").as_str(),

View File

@@ -47,7 +47,7 @@ const LONG_VERSION: &str = concat!(
concat!("git status :\n", build::GIT_STATUS_FILE),
);
#[derive(Debug, Parser, Clone)]
#[derive(Parser, Clone)]
#[command(version = SHORT_VERSION, long_version = LONG_VERSION)]
pub struct Opt {
/// DIR points to a directory on a filesystem.
@@ -60,7 +60,11 @@ pub struct Opt {
pub volumes: Vec<String>,
/// bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname
#[arg(long, default_value_t = rustfs_config::DEFAULT_ADDRESS.to_string(), env = "RUSTFS_ADDRESS")]
#[arg(
long,
default_value_t = rustfs_config::DEFAULT_ADDRESS.to_string(),
env = "RUSTFS_ADDRESS"
)]
pub address: String,
/// Domain name used for virtual-hosted-style requests.
@@ -73,23 +77,43 @@ pub struct Opt {
pub server_domains: Vec<String>,
/// Access key used for authentication.
#[arg(long, default_value_t = rustfs_credentials::DEFAULT_ACCESS_KEY.to_string(), env = "RUSTFS_ACCESS_KEY")]
#[arg(
long,
default_value_t = rustfs_credentials::DEFAULT_ACCESS_KEY.to_string(),
env = "RUSTFS_ACCESS_KEY"
)]
pub access_key: String,
/// Secret key used for authentication.
#[arg(long, default_value_t = rustfs_credentials::DEFAULT_SECRET_KEY.to_string(), env = "RUSTFS_SECRET_KEY")]
#[arg(
long,
default_value_t = rustfs_credentials::DEFAULT_SECRET_KEY.to_string(),
env = "RUSTFS_SECRET_KEY"
)]
pub secret_key: String,
/// Enable console server
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ENABLE, env = "RUSTFS_CONSOLE_ENABLE")]
#[arg(
long,
default_value_t = rustfs_config::DEFAULT_CONSOLE_ENABLE,
env = "RUSTFS_CONSOLE_ENABLE"
)]
pub console_enable: bool,
/// Console server bind address
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ADDRESS.to_string(), env = "RUSTFS_CONSOLE_ADDRESS")]
#[arg(
long,
default_value_t = rustfs_config::DEFAULT_CONSOLE_ADDRESS.to_string(),
env = "RUSTFS_CONSOLE_ADDRESS"
)]
pub console_address: String,
/// Observability endpoint for trace, metrics and logs,only support grpc mode.
#[arg(long, default_value_t = rustfs_config::DEFAULT_OBS_ENDPOINT.to_string(), env = "RUSTFS_OBS_ENDPOINT")]
#[arg(
long,
default_value_t = rustfs_config::DEFAULT_OBS_ENDPOINT.to_string(),
env = "RUSTFS_OBS_ENDPOINT"
)]
pub obs_endpoint: String,
/// tls path for rustfs API and console.
@@ -137,6 +161,42 @@ pub struct Opt {
pub buffer_profile: String,
}
impl std::fmt::Debug for Opt {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Opt")
.field("volumes", &self.volumes)
.field("address", &self.address)
.field("server_domains", &self.server_domains)
.field("access_key", &self.access_key)
.field("secret_key", &Opt::mask_sensitive(Some(&self.secret_key))) // Hide sensitive values
.field("console_enable", &self.console_enable)
.field("console_address", &self.console_address)
.field("obs_endpoint", &self.obs_endpoint)
.field("tls_path", &self.tls_path)
.field("license", &Opt::mask_sensitive(self.license.as_ref()))
.field("region", &self.region)
.field("kms_enable", &self.kms_enable)
.field("kms_backend", &self.kms_backend)
.field("kms_key_dir", &self.kms_key_dir)
.field("kms_vault_address", &self.kms_vault_address)
.field("kms_vault_token", &Opt::mask_sensitive(self.kms_vault_token.as_ref()))
.field("kms_default_key_id", &self.kms_default_key_id)
.field("buffer_profile_disable", &self.buffer_profile_disable)
.field("buffer_profile", &self.buffer_profile)
.finish()
}
}
impl Opt {
/// Mask sensitive information in Option<String>
fn mask_sensitive(s: Option<&String>) -> String {
match s {
None => "".to_string(),
Some(s) => format!("{}***{}|{}", s.chars().next().unwrap_or('*'), s.chars().last().unwrap_or('*'), s.len()),
}
}
}
// lazy_static::lazy_static! {
// pub(crate) static ref OPT: OnceLock<Opt> = OnceLock::new();
// }

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ecstore::bucket::quota::QuotaError;
use rustfs_ecstore::error::StorageError;
use s3s::{S3Error, S3ErrorCode};
@@ -284,6 +285,29 @@ impl From<rustfs_iam::error::Error> for ApiError {
}
}
impl From<QuotaError> for ApiError {
fn from(err: QuotaError) -> Self {
let code = match &err {
QuotaError::QuotaExceeded { .. } => S3ErrorCode::InvalidRequest,
QuotaError::ConfigNotFound { .. } => S3ErrorCode::NoSuchBucket,
QuotaError::InvalidConfig { .. } => S3ErrorCode::InvalidArgument,
QuotaError::StorageError(_) => S3ErrorCode::InternalError,
};
let message = if code == S3ErrorCode::InternalError {
err.to_string()
} else {
ApiError::error_code_to_message(&code)
};
ApiError {
code,
message,
source: Some(Box::new(err)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -250,7 +250,8 @@ pub async fn start_http_server(
b.set_access(store.clone());
b.set_route(admin::make_admin_route(opt.console_enable)?);
if !opt.server_domains.is_empty() {
// console server does not need to setup virtual-hosted-style requests
if !opt.server_domains.is_empty() && !opt.console_enable {
MultiDomain::new(&opt.server_domains).map_err(Error::other)?; // validate domains
// add the default port number to the given server domains
@@ -378,20 +379,11 @@ pub async fn start_http_server(
// Enable TCP Keepalive to detect dead clients (e.g. power loss)
// Idle: 10s, Interval: 5s, Retries: 3
let ka = {
#[cfg(not(target_os = "openbsd"))]
let ka = TcpKeepalive::new()
.with_time(Duration::from_secs(10))
.with_interval(Duration::from_secs(5))
.with_retries(3);
// On OpenBSD socket2 only supports configuring the initial
// TCP keepalive timeout; intervals and retries cannot be set.
#[cfg(target_os = "openbsd")]
let ka = TcpKeepalive::new().with_time(Duration::from_secs(10));
ka
};
let mut ka = TcpKeepalive::new().with_time(Duration::from_secs(10));
#[cfg(not(target_os = "openbsd"))]
{
ka = ka.with_interval(Duration::from_secs(5)).with_retries(3);
}
if let Err(err) = socket_ref.set_tcp_keepalive(&ka) {
warn!(?err, "Failed to set TCP_KEEPALIVE");

View File

@@ -68,6 +68,16 @@ pub async fn authorize_request<T>(req: &mut S3Request<T>, action: Action) -> S3R
deny_only: false,
})
.await
&& !PolicySys::is_allowed(&BucketPolicyArgs {
bucket: req_info.bucket.as_deref().unwrap_or(""),
action: Action::S3Action(S3Action::DeleteObjectVersionAction),
is_owner: req_info.is_owner,
account: &cred.access_key,
groups: &cred.groups,
conditions: &conditions,
object: req_info.object.as_deref().unwrap_or(""),
})
.await
{
return Err(s3_error!(AccessDenied, "Access Denied"));
}
@@ -89,8 +99,22 @@ pub async fn authorize_request<T>(req: &mut S3Request<T>, action: Action) -> S3R
return Ok(());
}
if action == Action::S3Action(S3Action::ListBucketVersionsAction)
&& iam_store
if PolicySys::is_allowed(&BucketPolicyArgs {
bucket: req_info.bucket.as_deref().unwrap_or(""),
action,
is_owner: req_info.is_owner,
account: &cred.access_key,
groups: &cred.groups,
conditions: &conditions,
object: req_info.object.as_deref().unwrap_or(""),
})
.await
{
return Ok(());
}
if action == Action::S3Action(S3Action::ListBucketVersionsAction) {
if iam_store
.is_allowed(&Args {
account: &cred.access_key,
groups: &cred.groups,
@@ -103,8 +127,23 @@ pub async fn authorize_request<T>(req: &mut S3Request<T>, action: Action) -> S3R
deny_only: false,
})
.await
{
return Ok(());
{
return Ok(());
}
if PolicySys::is_allowed(&BucketPolicyArgs {
bucket: req_info.bucket.as_deref().unwrap_or(""),
action: Action::S3Action(S3Action::ListBucketAction),
is_owner: req_info.is_owner,
account: &cred.access_key,
groups: &cred.groups,
conditions: &conditions,
object: req_info.object.as_deref().unwrap_or(""),
})
.await
{
return Ok(());
}
}
} else {
let conditions = get_condition_values(
@@ -817,11 +856,14 @@ impl S3Access for FS {
authorize_request(req, Action::S3Action(S3Action::ListBucketMultipartUploadsAction)).await
}
/// Checks whether the ListObjectVersions request has accesses to the resources.
/// Checks whether the `ListObjectVersions` request is authorized for the requested bucket.
///
/// This method returns `Ok(())` by default.
async fn list_object_versions(&self, _req: &mut S3Request<ListObjectVersionsInput>) -> S3Result<()> {
Ok(())
/// Returns `Ok(())` if the request is allowed, or an error if access is denied or another
/// authorization-related issue occurs.
async fn list_object_versions(&self, req: &mut S3Request<ListObjectVersionsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::ListBucketVersionsAction)).await
}
/// Checks whether the ListObjects request has accesses to the resources.

View File

@@ -40,6 +40,7 @@ use datafusion::arrow::{
use futures::StreamExt;
use http::{HeaderMap, StatusCode};
use metrics::counter;
use rustfs_ecstore::bucket::quota::checker::QuotaChecker;
use rustfs_ecstore::{
bucket::{
lifecycle::{
@@ -54,6 +55,7 @@ use rustfs_ecstore::{
metadata_sys::get_replication_config,
object_lock::objectlock_sys::BucketObjectLockSys,
policy_sys::PolicySys,
quota::QuotaOperation,
replication::{
DeletedObjectReplicationInfo, ReplicationConfigurationExt, check_replicate_delete, get_must_replicate_options,
must_replicate, schedule_replication, schedule_replication_delete,
@@ -1067,11 +1069,42 @@ impl S3 for FS {
}
}
// check quota for copy operation
if let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get() {
let quota_checker = QuotaChecker::new(metadata_sys.clone());
match quota_checker
.check_quota(&bucket, QuotaOperation::CopyObject, src_info.size as u64)
.await
{
Ok(check_result) => {
if !check_result.allowed {
return Err(S3Error::with_message(
S3ErrorCode::InvalidRequest,
format!(
"Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes",
check_result.current_usage,
check_result.quota_limit.unwrap_or(0)
),
));
}
}
Err(e) => {
warn!("Quota check failed for bucket {}: {}, allowing operation", bucket, e);
}
}
}
let oi = store
.copy_object(&src_bucket, &src_key, &bucket, &key, &mut src_info, &src_opts, &dst_opts)
.await
.map_err(ApiError::from)?;
// Update quota tracking after successful copy
if rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get().is_some() {
rustfs_ecstore::data_usage::increment_bucket_usage_memory(&bucket, oi.size as u64).await;
}
// Invalidate cache for the destination object to prevent stale data
let manager = get_concurrency_manager();
let dest_bucket = bucket.clone();
@@ -1440,6 +1473,9 @@ impl S3 for FS {
}
};
// Fast in-memory update for immediate quota consistency
rustfs_ecstore::data_usage::decrement_bucket_usage_memory(&bucket, obj_info.size as u64).await;
// Invalidate cache for the deleted object
let manager = get_concurrency_manager();
let del_bucket = bucket.clone();
@@ -1534,8 +1570,6 @@ impl S3 for FS {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let has_lock_enable = BucketObjectLockSys::get(&bucket).await.is_some();
let version_cfg = BucketVersioningSys::get(&bucket).await.unwrap_or_default();
#[derive(Default, Clone)]
@@ -1548,6 +1582,7 @@ impl S3 for FS {
let mut object_to_delete = Vec::new();
let mut object_to_delete_index = HashMap::new();
let mut object_sizes = HashMap::new();
for (idx, obj_id) in delete.objects.iter().enumerate() {
// Per S3 API spec, "null" string means non-versioned object
// Filter out "null" version_id to treat as unversioned
@@ -1606,15 +1641,14 @@ impl S3 for FS {
.await
.map_err(ApiError::from)?;
let mut goi = ObjectInfo::default();
let mut gerr = None;
// Get object info to collect size for quota tracking
let (goi, gerr) = match store.get_object_info(&bucket, &object.object_name, &opts).await {
Ok(res) => (res, None),
Err(e) => (ObjectInfo::default(), Some(e.to_string())),
};
if replicate_deletes || object.version_id.is_some() && has_lock_enable {
(goi, gerr) = match store.get_object_info(&bucket, &object.object_name, &opts).await {
Ok(res) => (res, None),
Err(e) => (ObjectInfo::default(), Some(e.to_string())),
};
}
// Store object size for quota tracking
object_sizes.insert(object.object_name.clone(), goi.size);
if is_dir_object(&object.object_name) && object.version_id.is_none() {
object.version_id = Some(Uuid::nil());
@@ -1716,6 +1750,10 @@ impl S3 for FS {
dobjs[i].replication_state = Some(object_to_delete[i].replication_state());
}
delete_results[*didx].delete_object = Some(dobjs[i].clone());
// Update quota tracking for successfully deleted objects
if let Some(&size) = object_sizes.get(&obj.object_name) {
rustfs_ecstore::data_usage::decrement_bucket_usage_memory(&bucket, size as u64).await;
}
continue;
}
@@ -3151,6 +3189,34 @@ impl S3 for FS {
// Validate object key
validate_object_key(&key, "PUT")?;
// check quota for put operation
if let Some(size) = content_length
&& let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get()
{
let quota_checker = QuotaChecker::new(metadata_sys.clone());
match quota_checker
.check_quota(&bucket, QuotaOperation::PutObject, size as u64)
.await
{
Ok(check_result) => {
if !check_result.allowed {
return Err(S3Error::with_message(
S3ErrorCode::InvalidRequest,
format!(
"Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes",
check_result.current_usage,
check_result.quota_limit.unwrap_or(0)
),
));
}
}
Err(e) => {
warn!("Quota check failed for bucket {}: {}, allowing operation", bucket, e);
}
}
}
if if_match.is_some() || if_none_match.is_some() {
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
@@ -3429,6 +3495,9 @@ impl S3 for FS {
.await
.map_err(ApiError::from)?;
// Fast in-memory update for immediate quota consistency
rustfs_ecstore::data_usage::increment_bucket_usage_memory(&bucket, obj_info.size as u64).await;
// Invalidate cache for the written object to prevent stale data
let manager = get_concurrency_manager();
let put_bucket = bucket.clone();
@@ -4356,6 +4425,38 @@ impl S3 for FS {
.await
.map_err(ApiError::from)?;
// check quota after completing multipart upload
if let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get() {
let quota_checker = QuotaChecker::new(metadata_sys.clone());
match quota_checker
.check_quota(&bucket, QuotaOperation::PutObject, obj_info.size as u64)
.await
{
Ok(check_result) => {
if !check_result.allowed {
// Quota exceeded, delete the completed object
let _ = store.delete_object(&bucket, &key, ObjectOptions::default()).await;
return Err(S3Error::with_message(
S3ErrorCode::InvalidRequest,
format!(
"Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes",
check_result.current_usage,
check_result.quota_limit.unwrap_or(0)
),
));
}
// Update quota tracking after successful multipart upload
if rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get().is_some() {
rustfs_ecstore::data_usage::increment_bucket_usage_memory(&bucket, obj_info.size as u64).await;
}
}
Err(e) => {
warn!("Quota check failed for bucket {}: {}, allowing operation", bucket, e);
}
}
}
// Invalidate cache for the completed multipart object
let manager = get_concurrency_manager();
let mpu_bucket = bucket.clone();

View File

@@ -162,9 +162,17 @@ impl OperationHelper {
.build();
let mut final_builder = builder.api(api_details.clone());
if let Ok(res) = result {
final_builder = final_builder.resp_header(extract_resp_elements(res));
}
if let Some(err) = error_msg {
final_builder = final_builder.error(err);
}
if let Some(sk) = rustfs_credentials::get_global_access_key_opt() {
final_builder = final_builder.access_key(&sk);
}
self.audit_builder = Some(final_builder);
self.api_builder = ApiDetailsBuilder(api_details); // Store final details for Drop use
}

View File

@@ -18,6 +18,7 @@ use rustfs_ecstore::error::Result;
use rustfs_ecstore::error::StorageError;
use rustfs_utils::http::AMZ_META_UNENCRYPTED_CONTENT_LENGTH;
use rustfs_utils::http::AMZ_META_UNENCRYPTED_CONTENT_MD5;
use rustfs_utils::http::RUSTFS_FORCE_DELETE;
use s3s::header::X_AMZ_OBJECT_LOCK_MODE;
use s3s::header::X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE;
@@ -77,6 +78,11 @@ pub async fn del_opts(
StorageError::InvalidArgument(bucket.to_owned(), object.to_owned(), err.to_string())
})?;
opts.delete_prefix = headers
.get(RUSTFS_FORCE_DELETE)
.map(|v| v.to_str().unwrap_or_default() == "true")
.unwrap_or_default();
opts.version_id = {
if is_dir_object(object) && vid.is_none() {
Some(Uuid::nil().to_string())
@@ -665,6 +671,39 @@ mod tests {
}
}
#[tokio::test]
async fn test_del_opts_with_delete_prefix() {
let mut headers = create_test_headers();
let metadata = create_test_metadata();
// Test without RUSTFS_FORCE_DELETE header - should default to false
let result = del_opts("test-bucket", "test-object", None, &headers, metadata.clone()).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert!(!opts.delete_prefix);
// Test with RUSTFS_FORCE_DELETE header set to "true"
headers.insert(RUSTFS_FORCE_DELETE, HeaderValue::from_static("true"));
let result = del_opts("test-bucket", "test-object", None, &headers, metadata.clone()).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert!(opts.delete_prefix);
// Test with RUSTFS_FORCE_DELETE header set to "false"
headers.insert(RUSTFS_FORCE_DELETE, HeaderValue::from_static("false"));
let result = del_opts("test-bucket", "test-object", None, &headers, metadata.clone()).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert!(!opts.delete_prefix);
// Test with RUSTFS_FORCE_DELETE header set to other value
headers.insert(RUSTFS_FORCE_DELETE, HeaderValue::from_static("maybe"));
let result = del_opts("test-bucket", "test-object", None, &headers, metadata).await;
assert!(result.is_ok());
let opts = result.unwrap();
assert!(!opts.delete_prefix);
}
#[tokio::test]
async fn test_get_opts_basic() {
let headers = create_test_headers();