Fix: add support for additional AWS S3 storage classes and validation logic (#487)

* Fix: add pagination fields to S3 response

* Fix: add support for additional AWS S3 storage classes and validation logic

* Fix: improve handling of optional fields in S3 response

---------

Co-authored-by: DamonXue <damonxue2@gmail.com>
This commit is contained in:
0xdx2
2025-09-05 09:50:41 +08:00
committed by GitHub
parent 01b2623f66
commit 44f3eb7244
3 changed files with 130 additions and 3 deletions

View File

@@ -36,6 +36,17 @@ pub fn default_parity_count(drive: usize) -> usize {
pub const RRS: &str = "REDUCED_REDUNDANCY";
pub const STANDARD: &str = "STANDARD";
// AWS S3 Storage Classes
pub const DEEP_ARCHIVE: &str = "DEEP_ARCHIVE";
pub const EXPRESS_ONEZONE: &str = "EXPRESS_ONEZONE";
pub const GLACIER: &str = "GLACIER";
pub const GLACIER_IR: &str = "GLACIER_IR";
pub const INTELLIGENT_TIERING: &str = "INTELLIGENT_TIERING";
pub const ONEZONE_IA: &str = "ONEZONE_IA";
pub const OUTPOSTS: &str = "OUTPOSTS";
pub const SNOW: &str = "SNOW";
pub const STANDARD_IA: &str = "STANDARD_IA";
// Standard constants for config info storage class
pub const CLASS_STANDARD: &str = "standard";
pub const CLASS_RRS: &str = "rrs";
@@ -115,6 +126,15 @@ impl Config {
None
}
}
// All these storage classes use standard parity configuration
STANDARD | DEEP_ARCHIVE | EXPRESS_ONEZONE | GLACIER | GLACIER_IR | INTELLIGENT_TIERING | ONEZONE_IA | OUTPOSTS
| SNOW | STANDARD_IA => {
if self.initialized {
Some(self.standard.parity)
} else {
None
}
}
_ => {
if self.initialized {
Some(self.standard.parity)

View File

@@ -4778,10 +4778,18 @@ impl StorageAPI for SetDisks {
let part_number_marker = part_number_marker.unwrap_or_default();
// Extract storage class from metadata, default to STANDARD if not found
let storage_class = fi
.metadata
.get(rustfs_filemeta::headers::AMZ_STORAGE_CLASS)
.cloned()
.unwrap_or_else(|| storageclass::STANDARD.to_string());
let mut ret = ListPartsInfo {
bucket: bucket.to_owned(),
object: object.to_owned(),
upload_id: upload_id.to_owned(),
storage_class,
max_parts,
part_number_marker,
user_defined: fi.metadata.clone(),
@@ -6039,6 +6047,40 @@ pub fn should_prevent_write(oi: &ObjectInfo, if_none_match: Option<String>, if_m
}
}
/// Validates if the given storage class is supported
pub fn is_valid_storage_class(storage_class: &str) -> bool {
matches!(
storage_class,
storageclass::STANDARD
| storageclass::RRS
| storageclass::DEEP_ARCHIVE
| storageclass::EXPRESS_ONEZONE
| storageclass::GLACIER
| storageclass::GLACIER_IR
| storageclass::INTELLIGENT_TIERING
| storageclass::ONEZONE_IA
| storageclass::OUTPOSTS
| storageclass::SNOW
| storageclass::STANDARD_IA
)
}
/// Returns true if the storage class is a cold storage tier that requires special handling
pub fn is_cold_storage_class(storage_class: &str) -> bool {
matches!(
storage_class,
storageclass::DEEP_ARCHIVE | storageclass::GLACIER | storageclass::GLACIER_IR
)
}
/// Returns true if the storage class is an infrequent access tier
pub fn is_infrequent_access_class(storage_class: &str) -> bool {
matches!(
storage_class,
storageclass::ONEZONE_IA | storageclass::STANDARD_IA | storageclass::INTELLIGENT_TIERING
)
}
#[cfg(test)]
mod tests {
use super::*;
@@ -6528,4 +6570,53 @@ mod tests {
let if_match = None;
assert!(!should_prevent_write(&oi, if_none_match, if_match));
}
#[test]
fn test_is_valid_storage_class() {
// Test valid storage classes
assert!(is_valid_storage_class(storageclass::STANDARD));
assert!(is_valid_storage_class(storageclass::RRS));
assert!(is_valid_storage_class(storageclass::DEEP_ARCHIVE));
assert!(is_valid_storage_class(storageclass::EXPRESS_ONEZONE));
assert!(is_valid_storage_class(storageclass::GLACIER));
assert!(is_valid_storage_class(storageclass::GLACIER_IR));
assert!(is_valid_storage_class(storageclass::INTELLIGENT_TIERING));
assert!(is_valid_storage_class(storageclass::ONEZONE_IA));
assert!(is_valid_storage_class(storageclass::OUTPOSTS));
assert!(is_valid_storage_class(storageclass::SNOW));
assert!(is_valid_storage_class(storageclass::STANDARD_IA));
// Test invalid storage classes
assert!(!is_valid_storage_class("INVALID"));
assert!(!is_valid_storage_class(""));
assert!(!is_valid_storage_class("standard")); // lowercase
}
#[test]
fn test_is_cold_storage_class() {
// Test cold storage classes
assert!(is_cold_storage_class(storageclass::DEEP_ARCHIVE));
assert!(is_cold_storage_class(storageclass::GLACIER));
assert!(is_cold_storage_class(storageclass::GLACIER_IR));
// Test non-cold storage classes
assert!(!is_cold_storage_class(storageclass::STANDARD));
assert!(!is_cold_storage_class(storageclass::RRS));
assert!(!is_cold_storage_class(storageclass::STANDARD_IA));
assert!(!is_cold_storage_class(storageclass::EXPRESS_ONEZONE));
}
#[test]
fn test_is_infrequent_access_class() {
// Test infrequent access classes
assert!(is_infrequent_access_class(storageclass::ONEZONE_IA));
assert!(is_infrequent_access_class(storageclass::STANDARD_IA));
assert!(is_infrequent_access_class(storageclass::INTELLIGENT_TIERING));
// Test frequent access classes
assert!(!is_infrequent_access_class(storageclass::STANDARD));
assert!(!is_infrequent_access_class(storageclass::RRS));
assert!(!is_infrequent_access_class(storageclass::DEEP_ARCHIVE));
assert!(!is_infrequent_access_class(storageclass::EXPRESS_ONEZONE));
}
}

View File

@@ -57,8 +57,8 @@ use rustfs_ecstore::compress::MIN_COMPRESSIBLE_SIZE;
use rustfs_ecstore::compress::is_compressible;
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::DEFAULT_READ_BUFFER_SIZE;
use rustfs_ecstore::set_disk::MAX_PARTS_COUNT;
use rustfs_ecstore::set_disk::{DEFAULT_READ_BUFFER_SIZE, is_valid_storage_class};
use rustfs_ecstore::store_api::BucketOptions;
use rustfs_ecstore::store_api::CompletePart;
use rustfs_ecstore::store_api::DeleteBucketOptions;
@@ -1385,8 +1385,7 @@ impl S3 for FS {
let input = req.input;
if let Some(ref storage_class) = input.storage_class {
let is_valid = ["STANDARD", "REDUCED_REDUNDANCY"].contains(&storage_class.as_str());
if !is_valid {
if !is_valid_storage_class(storage_class.as_str()) {
return Err(s3_error!(InvalidStorageClass));
}
}
@@ -1530,9 +1529,17 @@ impl S3 for FS {
key,
tagging,
version_id,
storage_class,
..
} = req.input.clone();
// Validate storage class if provided
if let Some(ref storage_class) = storage_class {
if !is_valid_storage_class(storage_class.as_str()) {
return Err(s3_error!(InvalidStorageClass));
}
}
// mc cp step 3
// debug!("create_multipart_upload meta {:?}", &metadata);
@@ -1900,6 +1907,15 @@ impl S3 for FS {
id: RUSTFS_OWNER.id.clone(),
display_name: RUSTFS_OWNER.display_name.clone(),
}),
is_truncated: Some(res.is_truncated),
next_part_number_marker: res.next_part_number_marker.try_into().ok(),
max_parts: res.max_parts.try_into().ok(),
part_number_marker: res.part_number_marker.try_into().ok(),
storage_class: if res.storage_class.is_empty() {
None
} else {
Some(res.storage_class.into())
},
..Default::default()
};
Ok(S3Response::new(output))